text stringlengths 4 1.02M | meta dict |
|---|---|
"""Tests for control_flow_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.training import momentum
from tensorflow.python.util import nest
TestTuple = collections.namedtuple("TestTuple", "a b")
SingletonTestTuple = collections.namedtuple("SingletonTestTuple", "a")
class GroupTestCase(test_util.TensorFlowTestCase):
def _StripNode(self, nd):
snode = node_def_pb2.NodeDef(name=nd.name, op=nd.op, input=nd.input)
if nd.device:
snode.device = nd.device
return snode
def _StripGraph(self, gd):
"""Copy gd keeping only, node.name, node.op, node.input, and node.device."""
return graph_pb2.GraphDef(node=[self._StripNode(nd) for nd in gd.node])
def testGroup_NoDevices(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(0, name="a")
b = constant_op.constant(0, name="b")
c = constant_op.constant(0, name="c")
control_flow_ops.group(a.op, b.op, c.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const"}
node { name: "b" op: "Const"}
node { name: "c" op: "Const"}
node { name: "root" op: "NoOp" input: "^a" input: "^b" input: "^c" }
""", self._StripGraph(gd))
def testGroup_OneDevice(self):
with ops.Graph().as_default() as g:
with g.device("/task:0"):
a = constant_op.constant(0, name="a")
b = constant_op.constant(0, name="b")
control_flow_ops.group(a.op, b.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const" device: "/task:0" }
node { name: "b" op: "Const" device: "/task:0" }
node { name: "root" op: "NoOp" input: "^a" input: "^b" device: "/task:0" }
""", self._StripGraph(gd))
def testGroup_MultiDevice(self):
with ops.Graph().as_default() as g:
with g.device("/task:0"):
a = constant_op.constant(0, name="a")
b = constant_op.constant(0, name="b")
with g.device("/task:1"):
c = constant_op.constant(0, name="c")
d = constant_op.constant(0, name="d")
with g.device("/task:2"):
control_flow_ops.group(a.op, b.op, c.op, d.op, name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const" device: "/task:0"}
node { name: "b" op: "Const" device: "/task:0"}
node { name: "c" op: "Const" device: "/task:1"}
node { name: "d" op: "Const" device: "/task:1"}
node { name: "root/NoOp" op: "NoOp" input: "^a" input: "^b"
device: "/task:0" }
node { name: "root/NoOp_1" op: "NoOp" input: "^c" input: "^d"
device: "/task:1" }
node { name: "root" op: "NoOp" input: "^root/NoOp" input: "^root/NoOp_1"
device: "/task:2" }
""", self._StripGraph(gd))
def testPassingList(self):
with ops.Graph().as_default() as g:
a = constant_op.constant(0, name="a")
b = constant_op.constant(0, name="b")
control_flow_ops.group([a.op, b.op], name="root")
gd = g.as_graph_def()
self.assertProtoEquals("""
node { name: "a" op: "Const"}
node { name: "b" op: "Const"}
node { name: "root" op: "NoOp" input: "^a" input: "^b" }
""", self._StripGraph(gd))
def testPassingNonTensors(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError):
control_flow_ops.group(1, 2)
class ShapeTestCase(test_util.TensorFlowTestCase):
def testShape(self):
with ops.Graph().as_default():
tensor = constant_op.constant([1.0, 2.0])
self.assertEquals([2], tensor.get_shape())
self.assertEquals([2],
control_flow_ops.with_dependencies(
[constant_op.constant(1.0)], tensor).get_shape())
class WithDependenciesTestCase(test_util.TensorFlowTestCase):
def testTupleDependencies(self):
with ops.Graph().as_default():
counter = variable_scope.get_variable(
"my_counter", shape=[], initializer=init_ops.zeros_initializer())
increment_counter = state_ops.assign_add(counter, 1)
const_with_dep = control_flow_ops.with_dependencies(
(increment_counter, constant_op.constant(42)),
constant_op.constant(7))
with self.test_session():
variables.global_variables_initializer().run()
self.assertEquals(0, counter.eval())
self.assertEquals(7, const_with_dep.eval())
self.assertEquals(1, counter.eval())
def testListDependencies(self):
with ops.Graph().as_default():
counter = variable_scope.get_variable(
"my_counter", shape=[], initializer=init_ops.zeros_initializer())
increment_counter = state_ops.assign_add(counter, 1)
const_with_dep = control_flow_ops.with_dependencies(
[increment_counter, constant_op.constant(42)],
constant_op.constant(7))
with self.test_session():
variables.global_variables_initializer().run()
self.assertEquals(0, counter.eval())
self.assertEquals(7, const_with_dep.eval())
self.assertEquals(1, counter.eval())
class SwitchTestCase(test_util.TensorFlowTestCase):
def testIndexedSlicesWithDenseShape(self):
with self.test_session():
data = ops.IndexedSlices(
constant_op.constant([1, 2, 3]),
constant_op.constant([0, 1]),
dense_shape=constant_op.constant([3]))
zero = constant_op.constant(0)
one = constant_op.constant(1)
less_op = math_ops.less(zero, one)
_, switch_true = control_flow_ops.switch(data, less_op)
self.assertAllEqual([1, 2, 3], switch_true.values.eval())
self.assertAllEqual([0, 1], switch_true.indices.eval())
def testIndexedSlicesGradient(self):
with ops.Graph().as_default():
embedding_matrix = variable_scope.get_variable(
"embedding_matrix", [5, 5],
initializer=init_ops.random_normal_initializer())
def cond(it, _):
return it < 5
def body(it, cost):
embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0])
cost += math_ops.reduce_sum(embedding)
return it + 1, cost
_, cost = control_flow_ops.while_loop(
cond, body, [constant_op.constant(0),
constant_op.constant(0.0)])
optimizer = momentum.MomentumOptimizer(0.1, 0.9)
train_op = optimizer.minimize(cost)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
def testResourceReadInLoop(self):
with ops.Graph().as_default():
embedding_matrix = variable_scope.get_variable(
"embedding_matrix",
initializer=[[2.0], [3.0]],
use_resource=True)
def cond(it, _):
return it < 5
def body(it, cost):
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
cost += math_ops.reduce_sum(embedding)
return it + 1, cost
_, cost = control_flow_ops.while_loop(
cond, body, [constant_op.constant(0),
constant_op.constant(0.0)])
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(10.0, cost.eval())
def doTestIndexedSlicesGradientInCondInWhileLoop(self, use_resource=False):
with ops.Graph().as_default():
embedding_matrix = variable_scope.get_variable(
"embedding_matrix", [5, 5],
initializer=init_ops.random_normal_initializer(),
use_resource=use_resource)
def cond(it, _):
return it < 5
def body(it, cost):
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
cost = control_flow_ops.cond(
math_ops.equal(it, 3), lambda: math_ops.square(cost),
lambda: cost + math_ops.reduce_sum(embedding))
return it + 1, cost
_, cost = control_flow_ops.while_loop(
cond, body, [constant_op.constant(0),
constant_op.constant(0.0)])
dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
dynamic_grads.indices)
embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
static = math_ops.square(
math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
static_grads = math_ops.segment_sum(static_grads.values,
static_grads.indices)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
self.assertAllEqual(*sess.run([static_grads, dynamic_grads]))
def testIndexedSlicesGradientInCondInWhileLoop(self):
self.doTestIndexedSlicesGradientInCondInWhileLoop(use_resource=False)
def testIndexedSlicesGradientInCondInWhileLoopResource(self):
self.doTestIndexedSlicesGradientInCondInWhileLoop(use_resource=True)
def testIndexedSlicesWithShapeGradientInWhileLoop(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session() as sess:
num_steps = 9
inputs = array_ops.placeholder(dtype=dtype, shape=[num_steps])
initial_outputs = tensor_array_ops.TensorArray(
dtype=dtype, size=num_steps)
initial_i = constant_op.constant(0, dtype=dtypes.int32)
def cond(i, _):
return i < num_steps # pylint: disable=cell-var-from-loop
def body(i, outputs):
x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop
outputs = outputs.write(i, x)
return i + 1, outputs
_, outputs = control_flow_ops.while_loop(cond, body,
[initial_i, initial_outputs])
outputs = math_ops.reduce_sum(outputs.stack())
r = gradients_impl.gradients([outputs], [inputs])[0]
grad_wr_inputs = ops.convert_to_tensor(r)
o, grad = sess.run([outputs, grad_wr_inputs],
feed_dict={inputs: [4, 6, 0, 7, 0, 0, 1, 2, 0]})
self.assertEquals(o, 20)
self.assertAllEqual(grad, [1] * num_steps)
def testIndexedSlicesWithDynamicShapeGradientInWhileLoop(self):
for dtype in [dtypes.float32, dtypes.float64]:
with self.test_session() as sess:
inputs = array_ops.placeholder(dtype=dtype)
initial_outputs = tensor_array_ops.TensorArray(
dtype=dtype, dynamic_size=True, size=1)
initial_i = constant_op.constant(0, dtype=dtypes.int32)
def cond(i, _):
return i < array_ops.size(inputs) # pylint: disable=cell-var-from-loop
def body(i, outputs):
x = array_ops.gather(inputs, i) # pylint: disable=cell-var-from-loop
outputs = outputs.write(i, x)
return i + 1, outputs
_, outputs = control_flow_ops.while_loop(cond, body,
[initial_i, initial_outputs])
outputs = math_ops.reduce_sum(outputs.stack())
r = gradients_impl.gradients([outputs], [inputs])[0]
grad_wr_inputs = ops.convert_to_tensor(r)
o, grad = sess.run([outputs, grad_wr_inputs],
feed_dict={inputs: [1, 3, 2]})
self.assertEquals(o, 6)
self.assertAllEqual(grad, [1] * 3)
def testGradientThroughSingleBranchOutsideOfContext(self):
with self.test_session():
x = constant_op.constant(2.)
s = constant_op.constant(True)
x_false, x_true = control_flow_ops.switch(x, s)
grad_x_true = gradients_impl.gradients(x_true, x)[0]
grad_x_false = gradients_impl.gradients(x_false, x)[0]
self.assertEquals(grad_x_true.eval(), 1.)
self.assertEquals(grad_x_false.eval(), 0.)
class CondTest(test_util.TensorFlowTestCase):
def testCondTrue(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(2)
y = constant_op.constant(5)
z = control_flow_ops.cond(
math_ops.less(x, y), lambda: math_ops.multiply(x, 17),
lambda: math_ops.add(y, 23))
self.assertEquals(z.eval(), 34)
def testCondFalse(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(2)
y = constant_op.constant(1)
z = control_flow_ops.cond(
math_ops.less(x, y), lambda: math_ops.multiply(x, 17),
lambda: math_ops.add(y, 23))
self.assertEquals(z.eval(), 24)
def testCondTrueLegacy(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(2)
y = constant_op.constant(5)
z = control_flow_ops.cond(
math_ops.less(x, y), fn1=lambda: math_ops.multiply(x, 17),
fn2=lambda: math_ops.add(y, 23))
self.assertEquals(z.eval(), 34)
def testCondFalseLegacy(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(2)
y = constant_op.constant(1)
z = control_flow_ops.cond(
math_ops.less(x, y), fn1=lambda: math_ops.multiply(x, 17),
fn2=lambda: math_ops.add(y, 23))
self.assertEquals(z.eval(), 24)
def testCondModifyBoolPred(self):
# This test in particular used to fail only when running in GPU, hence
# use_gpu=True.
with ops.Graph().as_default():
with session.Session() as sess:
bool_var = variable_scope.get_variable("bool_var", dtype=dtypes.bool,
initializer=True)
cond_on_bool_var = control_flow_ops.cond(
pred=bool_var,
true_fn=lambda: state_ops.assign(bool_var, False),
false_fn=lambda: True)
sess.run(bool_var.initializer)
self.assertEquals(sess.run(cond_on_bool_var), False)
self.assertEquals(sess.run(cond_on_bool_var), True)
def testCondMissingArg1(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(1)
with self.assertRaises(TypeError):
control_flow_ops.cond(True, false_fn=lambda: x)
def testCondMissingArg2(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(1)
with self.assertRaises(TypeError):
control_flow_ops.cond(True, lambda: x)
def testCondDuplicateArg1(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(1)
with self.assertRaises(TypeError):
control_flow_ops.cond(True, lambda: x, lambda: x, fn1=lambda: x)
def testCondDuplicateArg2(self):
with ops.Graph().as_default():
with session.Session():
x = constant_op.constant(1)
with self.assertRaises(TypeError):
control_flow_ops.cond(True, lambda: x, lambda: x, fn2=lambda: x)
class ContextTest(test_util.TensorFlowTestCase):
def testCondContext(self):
with self.test_session() as sess:
x = constant_op.constant(2)
y = constant_op.constant(5)
control_flow_ops.cond(
math_ops.less(x, y), lambda: math_ops.multiply(x, 17),
lambda: math_ops.add(y, 23))
for op in sess.graph.get_operations():
c = op._get_control_flow_context()
if c:
self.assertProtoEquals(
c.to_proto(),
control_flow_ops.CondContext.from_proto(c.to_proto()).to_proto())
def _testWhileContextHelper(self, maximum_iterations=None):
with self.test_session() as sess:
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
control_flow_ops.while_loop(
c, b, [i], maximum_iterations=maximum_iterations)
for op in sess.graph.get_operations():
control_flow_context = op._get_control_flow_context()
if control_flow_context:
self.assertProtoEquals(
control_flow_context.to_proto(),
control_flow_ops.WhileContext.from_proto(
control_flow_context.to_proto()).to_proto())
def testWhileContext(self):
self._testWhileContextHelper()
def testWhileContextWithMaximumIterations(self):
self._testWhileContextHelper(maximum_iterations=10)
def testControlContextImportScope(self):
with self.test_session():
constant_op.constant(0, name="a")
constant_op.constant(2, name="test_scope/a")
b1 = constant_op.constant(1, name="b")
b2 = constant_op.constant(3, name="test_scope/b")
c = control_flow_ops.ControlFlowContext()
c._values = ["a", "b"]
c._external_values = {"a": b1}
c_with_scope = control_flow_ops.ControlFlowContext(
values_def=c._to_values_def(), import_scope="test_scope")
# _values and _external_values should be have scope prepended.
self.assertEquals(
c_with_scope._values, set(["test_scope/a", "test_scope/b"]))
self.assertEquals(
c_with_scope._external_values, {"test_scope/a": b2})
# Calling _to_proto() with export_scope should remove "test_scope".
self.assertProtoEquals(
c._to_values_def(),
c_with_scope._to_values_def(export_scope="test_scope"))
def _get_nested_shape(nested):
def _get_shape(tensor):
if isinstance(tensor, tensor_array_ops.TensorArray):
return tensor_array_ops.TensorArray
elif isinstance(tensor, ops.IndexedSlices):
return tensor.dense_shape
else:
return tensor.get_shape()
return nest.map_structure(_get_shape, nested)
def _create_tensor_array(size, shape):
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=size,
clear_after_read=False)
for i in range(size):
ta = ta.write(i, array_ops.zeros(shape))
return ta
def _raw_nested_shape(nested_shape):
def _raw_shape(shape):
if isinstance(shape, tensor_shape.TensorShape) and shape.ndims is not None:
return [x.value for x in shape]
else:
return None
return nest.map_structure(_raw_shape, nested_shape)
# TODO(yori): Add tests for indexed slices.
class DataTypesTest(test_util.TensorFlowTestCase):
def assertAllEqualNested(self, a, b):
if isinstance(a, (list, tuple)):
for entry_a, entry_b in zip(a, b):
self.assertAllEqualNested(entry_a, entry_b)
else:
self.assertAllEqual(a, b)
def _testShape(self, fn_true, fn_false, expected_shape,
strict=False):
condition = array_ops.placeholder(dtypes.bool)
output_cond = control_flow_ops.cond(condition, fn_true, fn_false,
strict=strict)
self.assertEqual(
_raw_nested_shape(_get_nested_shape(output_cond)),
_raw_nested_shape(expected_shape))
output_case = control_flow_ops.case([(condition, fn_true)], fn_false,
strict=strict)
self.assertEqual(
_raw_nested_shape(_get_nested_shape(output_case)),
_raw_nested_shape(expected_shape))
def _testReturnValues(self, fn_true, fn_false, expected_value_true,
expected_value_false, strict=False,
check_cond=True, feed_dict=None):
if feed_dict is None: feed_dict = {}
condition = array_ops.placeholder(dtypes.bool)
output_cond = control_flow_ops.cond(condition, fn_true, fn_false,
strict=strict)
output_case = control_flow_ops.case([(condition, fn_true)], fn_false,
strict=strict)
with self.test_session() as sess:
variables.global_variables_initializer().run()
true_feed_dict = {condition: True}
true_feed_dict.update(feed_dict)
result_cond, result_case = sess.run([output_cond, output_case],
feed_dict=true_feed_dict)
self.assertAllEqualNested(result_cond, expected_value_true)
if check_cond:
self.assertAllEqualNested(result_case, expected_value_true)
false_feed_dict = {condition: False}
false_feed_dict.update(feed_dict)
result_cond, result_case = sess.run([output_cond, output_case],
feed_dict=false_feed_dict)
self.assertAllEqualNested(result_cond, expected_value_false)
if check_cond:
self.assertAllEqualNested(result_case, expected_value_false)
def test_int(self):
shape = tensor_shape.TensorShape([])
fn_true = lambda: 1
fn_false = lambda: 2
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, 1, 2)
self._testShape(fn_true, fn_false, shape, strict=True)
self._testReturnValues(fn_true, fn_false, 1, 2, strict=True)
def test_float(self):
shape = tensor_shape.TensorShape([])
fn_true = lambda: 1.0
fn_false = lambda: 2.0
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, 1.0, 2.0)
def test_noop(self):
shape = tensor_shape.TensorShape(None)
self._testShape(control_flow_ops.no_op, control_flow_ops.no_op, shape)
self._testReturnValues(control_flow_ops.no_op, control_flow_ops.no_op,
True, False, check_cond=False)
def test_string(self):
shape = tensor_shape.TensorShape([])
fn_true = lambda: "abc"
fn_false = lambda: "xyz"
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, b"abc", b"xyz")
def test_variable(self):
shape = tensor_shape.TensorShape([])
fn_true = lambda: variables.Variable(3.0)
fn_false = lambda: variables.Variable(4.0)
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, 3.0, 4.0)
def test_none(self):
fn_none = lambda: None
fn_tensor = lambda: constant_op.constant(1)
with self.assertRaises(ValueError):
control_flow_ops.cond(constant_op.constant(True), fn_none, fn_tensor)
with self.assertRaises(ValueError):
control_flow_ops.cond(constant_op.constant(True), fn_tensor, fn_none)
def test_tensors(self):
def _build_true_branch(dtype):
def _build():
return (array_ops.zeros([2, 2], dtype=dtype),
array_ops.ones([3, 3], dtype=dtype))
return _build
def _build_false_branch(dtype):
def _build():
return (array_ops.ones([2, 2], dtype=dtype),
array_ops.zeros([3, 3], dtype=dtype))
return _build
for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8):
shape = (tensor_shape.TensorShape([2, 2]),
tensor_shape.TensorShape([3, 3]))
fn_true = _build_true_branch(dtype)
fn_false = _build_false_branch(dtype)
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false,
(np.zeros([2, 2]), np.ones([3, 3])),
(np.ones([2, 2]), np.zeros([3, 3])))
def test_tensors_unknown_shape(self):
def _build_true_branch(dtype):
tensor = array_ops.placeholder(dtype=dtype, shape=None)
def _build():
return tensor
return _build, tensor
def _build_false_branch(dtype):
tensor = array_ops.placeholder(dtype=dtype, shape=None)
def _build():
return tensor
return _build, tensor
for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8):
shape = tensor_shape.TensorShape(None)
fn_true, true_tensor = _build_true_branch(dtype)
fn_false, false_tensor = _build_false_branch(dtype)
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false,
np.zeros([2, 2]), np.ones([2, 2]),
feed_dict={true_tensor: np.zeros([2, 2]),
false_tensor: np.ones([2, 2])})
def test_sparse_tensors(self):
shape = tensor_shape.TensorShape([None, None])
def true_fn():
return [sparse_tensor.SparseTensor(indices=[[0, 0], [1, 2]],
values=[1, 2], dense_shape=[3, 4])]
def false_fn():
return [sparse_tensor.SparseTensor(indices=[[0, 0], [2, 1]],
values=[3, 4], dense_shape=[3, 4])]
value1 = sparse_tensor.SparseTensorValue(indices=[[0, 0], [1, 2]],
values=[1, 2], dense_shape=[3, 4])
value2 = sparse_tensor.SparseTensorValue(indices=[[0, 0], [2, 1]],
values=[3, 4], dense_shape=[3, 4])
self._testShape(true_fn, false_fn, shape)
self._testReturnValues(true_fn, false_fn, value1, value2)
self._testShape(true_fn, false_fn, [shape], strict=True)
self._testReturnValues(true_fn, false_fn, [value1], [value2], strict=True)
def test_tensors_with_partially_specified_shapes(self):
def _build_branch(dtype, shape):
a = array_ops.placeholder(dtype=dtype, shape=shape[0])
b = array_ops.placeholder(dtype=dtype, shape=shape[1])
c = array_ops.placeholder(dtype=dtype, shape=shape[2])
def _build():
return a, b, c
return _build, (a, b, c)
for dtype in (dtypes.float16, dtypes.int8, dtypes.int32, dtypes.uint8):
shape = (tensor_shape.TensorShape([None, 2]),
tensor_shape.TensorShape([None]),
tensor_shape.TensorShape([3, None]))
fn_true, true_tensors = _build_branch(dtype, shape)
fn_false, false_tensors = _build_branch(dtype, shape)
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false,
(np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])),
(np.zeros([2, 2]), np.zeros(5), np.ones([3, 3])),
feed_dict={true_tensors[0]: np.zeros([2, 2]),
false_tensors[0]: np.zeros([2, 2]),
true_tensors[1]: np.zeros([5]),
false_tensors[1]: np.zeros([5]),
true_tensors[2]: np.ones([3, 3]),
false_tensors[2]: np.ones([3, 3])})
def test_tensor_arrays(self):
element_shape = tensor_shape.TensorShape([2])
ta1 = _create_tensor_array(4, element_shape)
ta2 = _create_tensor_array(4, element_shape)
shape = tensor_array_ops.TensorArray
fn_true = lambda: ta1
fn_false = lambda: ta2
self._testShape(fn_true, fn_false, shape)
def test_tensor_array_reads(self):
shape = tensor_shape.TensorShape([2])
ta = _create_tensor_array(4, shape)
fn_true = lambda: ta.read(0)
fn_false = lambda: ta.read(1)
self._testShape(fn_true, fn_false, shape)
def test_list(self):
shape = [tensor_shape.TensorShape([]), tensor_shape.TensorShape([]),
tensor_shape.TensorShape([])]
fn_true = lambda: [constant_op.constant(1), 2, variables.Variable(3.0)]
fn_false = lambda: [constant_op.constant(3), 4, variables.Variable(5.0)]
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, [1, 2, 3.0], [3, 4, 5.0])
def test_non_strict(self):
shape = tensor_shape.TensorShape([])
fn_tensor = lambda: constant_op.constant(1)
fn_list = lambda: [constant_op.constant(2)]
fn_tuple = lambda: (constant_op.constant(3),)
self._testShape(fn_tensor, fn_list, shape)
self._testShape(fn_tensor, fn_tuple, shape)
self._testShape(fn_list, fn_tuple, shape)
self._testReturnValues(fn_tensor, fn_list, 1, 2)
self._testReturnValues(fn_tensor, fn_tuple, 1, 3)
self._testReturnValues(fn_list, fn_tuple, 2, 3)
def test_singleton_strict(self):
fn_tensor = lambda: constant_op.constant(1)
fn_list = lambda: [constant_op.constant(2)]
fn_tuple = lambda: (constant_op.constant(3),)
with self.assertRaises(ValueError):
control_flow_ops.cond(constant_op.constant(True), fn_tensor, fn_list,
strict=True)
with self.assertRaises(TypeError):
control_flow_ops.cond(constant_op.constant(True), fn_list, fn_tuple,
strict=True)
with self.assertRaises(ValueError):
control_flow_ops.case([(constant_op.constant(True), fn_tensor)], fn_list,
strict=True)
with self.assertRaises(TypeError):
control_flow_ops.case([(constant_op.constant(True), fn_list)], fn_tuple,
strict=True)
def test_singleton_list(self):
shape = tensor_shape.TensorShape([])
fn_true = lambda: [constant_op.constant(1)]
fn_false = lambda: [constant_op.constant(3)]
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, 1, 3)
self._testShape(fn_true, fn_false, [shape], strict=True)
self._testReturnValues(fn_true, fn_false, [1], [3], strict=True)
def test_singleton_tuple(self):
shape = tensor_shape.TensorShape([])
fn_true = lambda: (constant_op.constant(1),)
fn_false = lambda: (constant_op.constant(3),)
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, 1, 3)
self._testShape(fn_true, fn_false, (shape,), strict=True)
self._testReturnValues(fn_true, fn_false, (1,), (3,),
strict=True)
def test_singleton_namedtuple(self):
shape = tensor_shape.TensorShape([])
fn_true = lambda: SingletonTestTuple(constant_op.constant(1))
fn_false = lambda: SingletonTestTuple(constant_op.constant(3))
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, 1, 3)
self._testShape(fn_true, fn_false, SingletonTestTuple(shape),
strict=True)
self._testReturnValues(fn_true, fn_false, SingletonTestTuple(1),
SingletonTestTuple(3), strict=True)
def test_tuple(self):
shape = (tensor_shape.TensorShape([]), tensor_shape.TensorShape([]))
fn_true = lambda: (constant_op.constant(1), 2)
fn_false = lambda: (constant_op.constant(3), 4)
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, (1, 2), (3, 4))
def test_namedtuple(self):
shape = TestTuple(tensor_shape.TensorShape([]),
tensor_shape.TensorShape([]))
fn_true = lambda: TestTuple(constant_op.constant(1), 2)
fn_false = lambda: TestTuple(constant_op.constant(3), 4)
self._testShape(fn_true, fn_false, shape)
self._testReturnValues(fn_true, fn_false, TestTuple(1, 2), TestTuple(3, 4))
def test_nested(self):
shape = [tensor_shape.TensorShape([]),
TestTuple(tensor_shape.TensorShape([]),
[tensor_shape.TensorShape([]),
tensor_shape.TensorShape([])]),
tensor_shape.TensorShape([5, 5]),
tensor_shape.TensorShape([])]
def true_fn():
return [constant_op.constant(1),
TestTuple(constant_op.constant(2), [3, 4]),
array_ops.zeros([5, 5]), 6]
def false_fn():
return [constant_op.constant(11),
TestTuple(constant_op.constant(12), [13, 14]),
array_ops.ones([5, 5]), 16]
self._testShape(true_fn, false_fn, shape)
self._testReturnValues(
true_fn, false_fn,
[1, TestTuple(2, [3, 4]), np.zeros([5, 5]), 6],
[11, TestTuple(12, [13, 14]),
np.ones([5, 5]), 16])
def test_cond_inside_while_loop(self):
def body(i, matrix):
result_tuple, unused_matrix = control_flow_ops.cond(
constant_op.constant(True),
lambda: (TestTuple(matrix * 2, matrix * 4), matrix),
lambda: (TestTuple(matrix * 4, matrix * 2), matrix))
return [i+1, result_tuple.a]
iteration, matrix = control_flow_ops.while_loop(
lambda i, matrix: i < 10,
body,
loop_vars=[constant_op.constant(0),
array_ops.ones([2, 2])])
self.assertEqual(iteration.get_shape(), tensor_shape.TensorShape([]))
self.assertEqual(matrix.get_shape(), tensor_shape.TensorShape([2, 2]))
class CaseTest(test_util.TensorFlowTestCase):
def testCase_withDefault(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2)),
(math_ops.equal(x, 2), lambda: constant_op.constant(4))]
default = lambda: constant_op.constant(6)
output = control_flow_ops.case(conditions, default, exclusive=True)
with self.test_session() as sess:
self.assertEqual(sess.run(output, feed_dict={x: 1}), 2)
self.assertEqual(sess.run(output, feed_dict={x: 2}), 4)
self.assertEqual(sess.run(output, feed_dict={x: 3}), 6)
def testCase_multiple_matches_exclusive(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2)),
(math_ops.equal(x, 2), lambda: constant_op.constant(4)),
(math_ops.equal(x, 2), lambda: constant_op.constant(6))]
default = lambda: constant_op.constant(8)
output = control_flow_ops.case(conditions, default, exclusive=True)
with self.test_session() as sess:
self.assertEqual(sess.run(output, feed_dict={x: 1}), 2)
self.assertEqual(sess.run(output, feed_dict={x: 3}), 8)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"):
sess.run(output, feed_dict={x: 2})
def testCase_multiple_matches_non_exclusive(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2)),
(math_ops.equal(x, 2), lambda: constant_op.constant(4)),
(math_ops.equal(x, 2), lambda: constant_op.constant(6))]
default = lambda: constant_op.constant(8)
output = control_flow_ops.case(conditions, default, exclusive=False)
with self.test_session() as sess:
self.assertEqual(sess.run(output, feed_dict={x: 1}), 2)
self.assertEqual(sess.run(output, feed_dict={x: 2}), 4)
self.assertEqual(sess.run(output, feed_dict={x: 3}), 8)
def testCase_withoutDefault(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2)),
(math_ops.equal(x, 2), lambda: constant_op.constant(4)),
(math_ops.equal(x, 3), lambda: constant_op.constant(6))]
output = control_flow_ops.case(conditions, exclusive=True)
with self.test_session() as sess:
self.assertEqual(sess.run(output, feed_dict={x: 1}), 2)
self.assertEqual(sess.run(output, feed_dict={x: 2}), 4)
self.assertEqual(sess.run(output, feed_dict={x: 3}), 6)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"):
sess.run(output, feed_dict={x: 4})
def testCase_withoutDefault_oneCondition(self):
x = array_ops.placeholder(dtype=dtypes.int32, shape=[])
conditions = [(math_ops.equal(x, 1), lambda: constant_op.constant(2))]
output = control_flow_ops.case(conditions, exclusive=True)
with self.test_session() as sess:
self.assertEqual(sess.run(output, feed_dict={x: 1}), 2)
with self.assertRaisesRegexp(errors.InvalidArgumentError, "Input error:"):
sess.run(output, feed_dict={x: 4})
class WhileLoopTestCase(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testWhileLoopWithSingleVariable(self):
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
r = control_flow_ops.while_loop(c, b, [i])
self.assertEqual(self.evaluate(r), 10)
@test_util.run_in_graph_and_eager_modes
def testEagerWhileLoopWithSingleVariable_bodyReturnsTuple(self):
i = constant_op.constant(0)
c = lambda i: math_ops.less(i, 10)
b = lambda i: (math_ops.add(i, 1),)
r = control_flow_ops.while_loop(c, b, [i])
# Expect a tuple since that is what the body returns.
self.assertEqual(self.evaluate(r), (10,))
def testWhileLoopSameReturnShape_False(self):
i = constant_op.constant(0)
c = lambda i, _: math_ops.less(i, 10)
# Body returns a [tensor, []]
b = lambda i, _: [math_ops.add(i, 1), []]
# Should only return the tensor.
r = control_flow_ops.while_loop(c, b, [i, []])
self.assertEqual(self.evaluate(r), 10)
def testWhileLoopSameReturnShape_True(self):
i = constant_op.constant(0)
c = lambda i, _: math_ops.less(i, 10)
# Body returns a [tensor, []]
b = lambda i, _: [math_ops.add(i, 1), []]
# Should only return the original structure.
r = control_flow_ops.while_loop(c, b, [i, []], return_same_structure=True)
self.assertEqual(self.evaluate(r), [10, []])
if __name__ == "__main__":
googletest.main()
| {
"content_hash": "e4bc7fbf7d08e63472b8f1254a426171",
"timestamp": "",
"source": "github",
"line_count": 971,
"max_line_length": 81,
"avg_line_length": 39.46549948506694,
"alnum_prop": 0.617311656793925,
"repo_name": "manipopopo/tensorflow",
"id": "153548ae92cfecfe5c750746b1425abcf3747b1b",
"size": "39010",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/control_flow_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "324704"
},
{
"name": "C#",
"bytes": "8215"
},
{
"name": "C++",
"bytes": "46405377"
},
{
"name": "CMake",
"bytes": "206720"
},
{
"name": "Dockerfile",
"bytes": "6905"
},
{
"name": "Go",
"bytes": "1210133"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "830061"
},
{
"name": "Jupyter Notebook",
"bytes": "2632416"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52525"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99271"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "39882449"
},
{
"name": "Ruby",
"bytes": "551"
},
{
"name": "Shell",
"bytes": "447049"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
""" path.py - An object representing a path to a file or directory.
Original author:
Jason Orendorff <jason.orendorff\x40gmail\x2ecom>
Contributors:
Mikhail Gusarov <dottedmag@dottedmag.net>
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.3 or later.
"""
from __future__ import generators
import sys, warnings, os, fnmatch, glob, shutil, codecs, hashlib, errno
__version__ = '2.2.2.990'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'U'
if hasattr(__builtins__, 'file') and not hasattr(file, 'newlines'):
_textmode = 'r'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: #Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __enter__(self):
self._old_dir = self.getcwd()
os.chdir(self)
def __exit__(self, *_):
os.chdir(self._old_dir)
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
# --- Operations on path strings.
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
def basename(self): return self.__class__(os.path.basename(self))
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return open(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or file(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
return self.read_hash('md5')
def _hash(self, hash_name):
f = self.open('rb')
try:
m = hashlib.new(hash_name)
while True:
d = f.read(8192)
if not d:
break
m.update(d)
return m
finally:
f.close()
def read_hash(self, hash_name):
""" Calculate given hash for this file.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).digest()
def read_hexhash(self, hash_name):
""" Calculate given hash for this file, returning hexdigest.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).hexdigest()
# --- Methods for querying the filesystem.
# N.B. On some platforms, the os.path functions may be implemented in C
# (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
# bound. Playing it safe and wrapping them all in method calls.
def isabs(self): return os.path.isabs(self)
def exists(self): return os.path.exists(self)
def isdir(self): return os.path.isdir(self)
def isfile(self): return os.path.isfile(self)
def islink(self): return os.path.islink(self)
def ismount(self): return os.path.ismount(self)
if hasattr(os.path, 'samefile'):
def samefile(self): return os.path.samefile(self)
def getatime(self): return os.path.getatime(self)
atime = property(
getatime, None, None,
""" Last access time of the file. """)
def getmtime(self): return os.path.getmtime(self)
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
def getctime(self): return os.path.getctime(self)
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
def getsize(self): return os.path.getsize(self)
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def mkdir_p(self, mode=0777):
try:
self.mkdir(mode)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def makedirs_p(self, mode=0777):
try:
self.makedirs(mode)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def rmdir(self):
os.rmdir(self)
def rmdir_p(self):
try:
self.rmdir()
except OSError, e:
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
def removedirs(self):
os.removedirs(self)
def removedirs_p(self):
try:
self.removedirs()
except OSError, e:
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def remove_p(self):
try:
self.unlink()
except OSError, e:
if e.errno != errno.ENOENT:
raise
def unlink(self):
os.unlink(self)
def unlink_p(self):
self.remove_p()
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
| {
"content_hash": "20784d5e29dca65a915f522a207e3688",
"timestamp": "",
"source": "github",
"line_count": 1017,
"max_line_length": 97,
"avg_line_length": 33.087512291052114,
"alnum_prop": 0.5507578008915305,
"repo_name": "koll00/Gui_SM",
"id": "8d45e59978ee1889ef42fce6e03c8d018de665b9",
"size": "34749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "SMlib/utils/external/path.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7548"
},
{
"name": "JavaScript",
"bytes": "143217"
},
{
"name": "Python",
"bytes": "1701295"
}
],
"symlink_target": ""
} |
from mrq.task import Task
from mrq.context import connections, log
import urllib2
class TestIo(Task):
def run(self, params):
log.info("I/O starting")
ret = self._run(params)
log.info("I/O finished")
return ret
def _run(self, params):
if params["test"] == "mongodb-insert":
return connections.mongodb_jobs.tests_inserts.insert({"params": params["params"]}, manipulate=False)
elif params["test"] == "mongodb-find":
cursor = connections.mongodb_jobs.tests_inserts.find({"test": "x"})
return list(cursor)
elif params["test"] == "mongodb-count":
return connections.mongodb_jobs.tests_inserts.count()
elif params["test"] == "redis-llen":
return connections.redis.llen(params["params"]["key"])
elif params["test"] == "redis-lpush":
return connections.redis.lpush(params["params"]["key"], "xxx")
elif params["test"] == "urllib2-get":
fp = urllib2.urlopen(params["params"]["url"])
return fp.read()
elif params["test"] == "urllib2-post":
return urllib2.urlopen(params["params"]["url"], data="x=x").read()
elif params["test"] == "requests-get":
import requests
return requests.get(params["params"]["url"], verify=False).text
| {
"content_hash": "434ba515eb71966ba0e75cf80a63261a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 112,
"avg_line_length": 25.462962962962962,
"alnum_prop": 0.5781818181818181,
"repo_name": "IAlwaysBeCoding/mrq",
"id": "63d22f94581ecf10c1bae480b2d48878acd9b566",
"size": "1375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tasks/io.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5514"
},
{
"name": "HTML",
"bytes": "57447"
},
{
"name": "JavaScript",
"bytes": "72442"
},
{
"name": "Makefile",
"bytes": "2074"
},
{
"name": "Perl",
"bytes": "1374"
},
{
"name": "Python",
"bytes": "811047"
}
],
"symlink_target": ""
} |
import mock
import pytest
import six
from cmdtree import parser
from cmdtree.exceptions import ArgumentParseError
def mk_obj(property_dict):
class TestObject(object):
pass
obj = TestObject()
for key, value in six.iteritems(property_dict):
setattr(obj, key, value)
return obj
@pytest.fixture()
def aparser():
from cmdtree.parser import AParser
return AParser()
@pytest.fixture()
def test_func():
def func():
return "result"
return func
@pytest.mark.parametrize(
"arg_name, expected",
(
("hello_world", "hello_world"),
("hello-world", "hello_world"),
)
)
def test_normalize_arg_name(arg_name, expected):
from cmdtree.parser import _normalize_arg_name
assert _normalize_arg_name(arg_name) == expected
@pytest.mark.parametrize(
"p_dict, expected",
(
({"_k": "v", "k": "v"}, {"k": "v"}),
({"__k": "v", "k": "v"}, {"k": "v"}),
({"k1": "v", "k": "v"}, {"k": "v", "k1": "v"}),
)
)
def test_vars_should_return_right_dict(p_dict, expected):
obj = mk_obj(p_dict)
assert parser.vars_(
obj
) == expected
class TestAParser:
def test_should_execute_func(self, aparser, test_func):
aparser.add_cmd("test", func=test_func)
assert aparser.run(["test"]) == "result"
def test_should_execute_child_cmd(self, aparser, test_func):
parent = aparser.add_cmd("parent")
parent.add_cmd("child", func=test_func)
assert aparser.run(['parent', 'child']) == "result"
@pytest.mark.parametrize(
"cmd_func, exception",
(
(None, ValueError),
(lambda *args, **kwargs: "str", None),
)
)
def test_should_execute_without_func(self, cmd_func, exception, aparser):
parent = aparser.add_cmd("parent")
parent.add_cmd("child", func=cmd_func)
if exception is not None:
with pytest.raises(exception):
aparser.run(['parent', 'child'])
else:
assert aparser.run(['parent', 'child']) == "str"
@pytest.mark.parametrize(
"silent_exit, exception",
(
(False, ArgumentParseError),
(True, SystemExit)
)
)
def test_should_parent_cmd_exit_or_raise_error(self, silent_exit, exception, test_func, aparser):
from cmdtree.registry import env
env.silent_exit = silent_exit
parent = aparser.add_cmd("parent")
parent.add_cmd("child", func=test_func)
with pytest.raises(exception):
aparser.run(['parent'])
@pytest.mark.parametrize(
"arg_name, exception",
(
('--name', ValueError),
('-name', ValueError),
('name', None),
)
)
def test_should_argument_starts_with_valid_string(self, arg_name, exception, test_func, aparser):
cmd = aparser.add_cmd("execute", func=test_func)
with mock.patch.object(cmd, "add_argument") as mocked_add:
if exception is not None:
with pytest.raises(exception):
cmd.argument(arg_name)
else:
cmd.argument(arg_name)
mocked_add.assert_called_with(arg_name, help=None)
@pytest.mark.parametrize(
"arg_name, expected_name",
(
('--name', '--name'),
('-name', '-name'),
('name', '--name'),
)
)
def test_option_should_starts_with_hyphen(self, arg_name, expected_name, test_func, aparser):
cmd = aparser.add_cmd("execute", func=test_func)
with mock.patch.object(cmd, "add_argument") as mocked_add:
cmd.option(arg_name)
mocked_add.assert_called_with(expected_name, help=None)
@pytest.mark.parametrize(
"is_flag",
(
True,
False,
)
)
def test_option_should_work_with_is_flag(self, is_flag, test_func, aparser):
cmd = aparser.add_cmd("execute", func=test_func)
with mock.patch.object(cmd, "add_argument") as mocked_add:
cmd.option("name", is_flag=is_flag)
if is_flag:
mocked_add.assert_called_with("--name", help=None, action="store_true")
else:
mocked_add.assert_called_with("--name", help=None)
@pytest.mark.parametrize(
"default",
(
None,
1,
)
)
def test_option_should_work_with_default_value(self, default, aparser):
cmd = aparser.add_cmd("execute", func=test_func)
with mock.patch.object(cmd, "add_argument") as mocked_add:
cmd.option("name", default=default)
if default is None:
mocked_add.assert_called_with("--name", help=None)
else:
mocked_add.assert_called_with("--name", help=None, default=default)
@pytest.mark.parametrize(
"type_func, kwargs",
(
(mock.Mock(), {"help": None, "type": int}),
(None, {"help": None}),
)
)
def test_add_argument_work_with_type(
self, type_func, kwargs, aparser
):
if type_func is not None:
type_func.return_value = {"type": int}
with mock.patch.object(aparser, "add_argument") as mocked_add:
aparser.argument("name", type=type_func)
if type_func is not None:
assert type_func.called
mocked_add.assert_called_with("name", **kwargs)
@pytest.mark.parametrize(
"type_func, kwargs",
(
(mock.Mock(), {"help": None, "type": int}),
(None, {"help": None}),
)
)
def test_add_option_work_with_type(
self, type_func, kwargs, aparser
):
if type_func is not None:
type_func.return_value = {"type": int}
with mock.patch.object(aparser, "add_argument") as mocked_add:
aparser.option("name", type=type_func)
if type_func is not None:
assert type_func.called
mocked_add.assert_called_with("--name", **kwargs)
| {
"content_hash": "e86272d1f8e89a0b4084b367631b7c91",
"timestamp": "",
"source": "github",
"line_count": 197,
"max_line_length": 101,
"avg_line_length": 31.401015228426395,
"alnum_prop": 0.5460717749757517,
"repo_name": "winkidney/cmdtree",
"id": "83c499f8c16cda46c67caec67e7804d79177e0e3",
"size": "6186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/cmdtree/tests/unittest/test_parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "307"
},
{
"name": "Python",
"bytes": "48192"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
} |
'''
Aggregate fiscal year end schedule for each company in
the ernest_aq_forms index
** Note **
This runs prospectively using the --most-recent argument
'''
import argparse
from modules.compute_fye import COMPUTE_FYE
from generic.logger import LOGGER
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='fye_aggregation')
parser.add_argument('--from-scratch',
dest='from_scratch', action="store_true")
parser.add_argument('--most-recent',
dest='most_recent', action="store_true")
parser.add_argument("--config-path", type=str,
action='store', default='../config.json')
parser.add_argument("--log-file", type=str, action="store")
args = parser.parse_args()
logger = LOGGER('fye_graph', args.log_file).create_parent()
COMPUTE_FYE(args, 'fye_graph').compute()
| {
"content_hash": "408ad862631bc985f9e3e5fce7e4593c",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 67,
"avg_line_length": 36.04,
"alnum_prop": 0.632630410654828,
"repo_name": "gophronesis/ernest",
"id": "f41620c45a4f85224b79fd91c481593225b1f752",
"size": "924",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "enrich/compute-fye-graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "OpenEdge ABL",
"bytes": "23601"
},
{
"name": "Python",
"bytes": "484174"
},
{
"name": "R",
"bytes": "12430"
},
{
"name": "Shell",
"bytes": "21985"
}
],
"symlink_target": ""
} |
"""Reads and then deletes the message from SQS queue"""
from typing import Optional
from airflow.exceptions import AirflowException
from airflow.providers.amazon.aws.hooks.sqs import SQSHook
from airflow.sensors.base import BaseSensorOperator
class SQSSensor(BaseSensorOperator):
"""
Get messages from an SQS queue and then deletes the message from the SQS queue.
If deletion of messages fails an AirflowException is thrown otherwise, the message
is pushed through XCom with the key ``messages``.
:param aws_conn_id: AWS connection id
:type aws_conn_id: str
:param sqs_queue: The SQS queue url (templated)
:type sqs_queue: str
:param max_messages: The maximum number of messages to retrieve for each poke (templated)
:type max_messages: int
:param wait_time_seconds: The time in seconds to wait for receiving messages (default: 1 second)
:type wait_time_seconds: int
"""
template_fields = ('sqs_queue', 'max_messages')
def __init__(
self,
*,
sqs_queue,
aws_conn_id: str = 'aws_default',
max_messages: int = 5,
wait_time_seconds: int = 1,
**kwargs,
):
super().__init__(**kwargs)
self.sqs_queue = sqs_queue
self.aws_conn_id = aws_conn_id
self.max_messages = max_messages
self.wait_time_seconds = wait_time_seconds
self.hook: Optional[SQSHook] = None
def poke(self, context):
"""
Check for message on subscribed queue and write to xcom the message with key ``messages``
:param context: the context object
:type context: dict
:return: ``True`` if message is available or ``False``
"""
sqs_conn = self.get_hook().get_conn()
self.log.info('SQSSensor checking for message on queue: %s', self.sqs_queue)
messages = sqs_conn.receive_message(
QueueUrl=self.sqs_queue,
MaxNumberOfMessages=self.max_messages,
WaitTimeSeconds=self.wait_time_seconds,
)
self.log.info("received message %s", str(messages))
if 'Messages' in messages and messages['Messages']:
entries = [
{'Id': message['MessageId'], 'ReceiptHandle': message['ReceiptHandle']}
for message in messages['Messages']
]
result = sqs_conn.delete_message_batch(QueueUrl=self.sqs_queue, Entries=entries)
if 'Successful' in result:
context['ti'].xcom_push(key='messages', value=messages)
return True
else:
raise AirflowException(
'Delete SQS Messages failed ' + str(result) + ' for messages ' + str(messages)
)
return False
def get_hook(self) -> SQSHook:
"""Create and return an SQSHook"""
if self.hook:
return self.hook
self.hook = SQSHook(aws_conn_id=self.aws_conn_id)
return self.hook
| {
"content_hash": "01081cba53698867f61763b3516f658a",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 100,
"avg_line_length": 34.28735632183908,
"alnum_prop": 0.612470667113644,
"repo_name": "sekikn/incubator-airflow",
"id": "dc6217b5bd0a347959a0e9783e8e6a053f159f18",
"size": "3770",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/providers/amazon/aws/sensors/sqs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
from ctypes import CDLL, byref, CFUNCTYPE, POINTER, Structure, cast, addressof
from ctypes import c_void_p, c_char_p, c_long, c_int, c_bool, py_object, string_at
import threading
SDKError = ('OK', 'ClusterDown', 'NoSuchKey', 'Timeout', 'LockFail',
'CleanBinlogFail', 'UserExists', 'PermissionDenied', 'PasswordError',
'UnknownUser')
NodeStatus = ('Leader', 'Candidate', 'Follower', 'Offline')
ClusterInfo = ('server_id', 'status', 'term', 'last_log_index', 'last_log_term',
'commit_index', 'last_applied')
class WatchParam:
def __init__(self, key, value, deleted, context):
self.key = key
self.value = value
self.deleted = deleted
self.context = context
class InsSDK:
class _ClusterNodeInfo(Structure):
_fields_ = [('server_id', c_char_p),
('status', c_int),
('term', c_long),
('last_log_index', c_long),
('last_log_term', c_long),
('commit_index', c_long),
('last_applied', c_long)]
class _NodeStatInfo(Structure):
_fields_ = [('server_id', c_char_p),
('status', c_int),
('stats', c_long * 2 * 8)]
class _WatchParam(Structure):
_fields_ = [('key', c_char_p),
('value', c_char_p),
('deleted', c_bool),
('context', c_void_p)]
WatchCallback = CFUNCTYPE(None, POINTER(_WatchParam), c_long, c_int)
SessionTimeoutCallback = CFUNCTYPE(None, c_long, c_void_p)
def __init__(self, members):
if isinstance(members, basestring):
self._sdk = _ins.GetSDK(members)
else:
self._sdk = _ins.GetSDKFromArray(members)
self._local = threading.local()
self._local.errno = c_int()
def __del__(self):
if _ins != None:
_ins.DeleteSDK(self._sdk)
def error(self):
return SDKError[self._local.errno.value]
def show(self):
count = c_int()
cluster_ptr = _ins.SDKShowCluster(self._sdk, byref(count))
count = count.value
ClusterArray = self._ClusterNodeInfo * count
clusters = ClusterArray.from_address(cluster_ptr)
cluster_list = []
for i in xrange(count):
cluster_list.append({
'server_id' : str(clusters[i].server_id),
'status' : NodeStatus[clusters[i].status],
'term' : clusters[i].term,
'last_log_index' : clusters[i].last_log_index,
'last_log_term' : clusters[i].last_log_term,
'commit_index' : clusters[i].commit_index,
'last_applied' : clusters[i].last_applied
})
_ins.DeleteClusterArray(cluster_ptr)
return cluster_list
def stat(self):
count = c_int()
stat_ptr = _ins.SDKShowStatistics(self._sdk, byref(count))
count = count.value
StatArray = self._NodeStatInfo * count
stats = StatArray.from_address(stat_ptr)
stat_list = []
for i in xrange(count):
stat_list.append({
'server_id' : str(stats[i].server_id),
'status' : NodeStatus[stats[i].status],
'put' : {
'current' : int(stats[i].stats[0][0]),
'average' : int(stats[i].stats[0][1]),
},
'get' : {
'current' : int(stats[i].stats[1][0]),
'average' : int(stats[i].stats[1][1]),
},
'delete' : {
'current' : int(stats[i].stats[2][0]),
'average' : int(stats[i].stats[2][1]),
},
'scan' : {
'current' : int(stats[i].stats[3][0]),
'average' : int(stats[i].stats[3][1]),
},
'keepalive' : {
'current' : int(stats[i].stats[4][0]),
'average' : int(stats[i].stats[4][1]),
},
'lock' : {
'current' : int(stats[i].stats[5][0]),
'average' : int(stats[i].stats[5][1]),
},
'unlock' : {
'current' : int(stats[i].stats[6][0]),
'average' : int(stats[i].stats[6][1]),
},
'watch' : {
'current' : int(stats[i].stats[7][0]),
'average' : int(stats[i].stats[7][1]),
}
})
_ins.DeleteStatArray(stat_ptr)
return stat_list
def get(self, key):
buf = c_char_p()
buf_len = c_int()
ok = _ins.SDKGet(self._sdk, key, len(key), byref(buf), byref(buf_len), byref(self._local.errno))
if ok:
if buf_len == 0:
return ""
value = string_at(buf, buf_len)
_ins.FreeString(buf)
return value
else:
return ""
def put(self, key, value):
return _ins.SDKPut(self._sdk, key, len(key), value, len(value), byref(self._local.errno))
def delete(self, key):
return _ins.SDKDelete(self._sdk, key, len(key), byref(self._local.errno))
def scan(self, start_key, end_key):
self._local.errno = c_int(0)
return ScanResult(_ins.SDKScan(self._sdk, start_key, len(start_key), end_key, len(end_key)))
def watch(self, key, callback, context):
ctx = py_object(context)
self._contexts[addressof(ctx)] = ctx
self._callback[id(callback)] = callback
def _watch_wrapper(param, cb, error):
param = param.contents
context = cast(c_void_p(param.context), POINTER(py_object)).contents
pm = WatchParam(param.key, param.value, param.deleted, context.value)
InsSDK._callback[cb](pm, SDKError[error])
del InsSDK._callback[cb]
del InsSDK._contexts[addressof(context)]
return _ins.SDKWatch(self._sdk, key, len(key), self.WatchCallback(_watch_wrapper), \
id(callback), byref(ctx), byref(self._local.errno))
def lock(self, key):
return _ins.SDKLock(self._sdk, key, len(key), byref(self._local.errno))
def trylock(self, key):
return _ins.SDKTryLock(self._sdk, key, len(key), byref(self._local.errno))
def unlock(self, key):
return _ins.SDKUnLock(self._sdk, key, len(key), byref(self._local.errno))
def login(self, username, password):
return _ins.SDKLogin(self._sdk, username, password, byref(self._local.errno))
def logout(self):
return _ins.SDKLogout(self._sdk, byref(self._local.errno))
def register(self, username, password):
return _ins.SDKRegister(self._sdk, username, password, byref(self._local.errno))
def get_session_id(self):
return _ins.SDKGetSessionID(self._sdk)
def get_current_user_id(self):
return _ins.SDKGetCurrentUserID(self._sdk)
def is_logged_in(self):
return _ins.SDKIsLoggedIn(self._sdk)
def register_session_timeout(self, callback, context):
ctx = py_object(context)
self._contexts[addressof(context)] = context
self._callback[id(callback)] = callback
def _timeout_wrapper(cb, ctx):
context = cast(ctx, POINTER(py_object)).contents
InsSDK._callback[cb](context.value)
del InsSDK._callback[cb]
del InsSDK._contexts[addressof(context)]
_ins.SDKRegisterSessionTimeout(self._sdk, self.SessionTimeoutCallback(wrapper), \
id(callback), byref(ctx))
_contexts = {}
_callback = {}
# <-- InsSDK class definition ends here
class ScanResult:
def __init__(self, res_ptr):
self.scanner = res_ptr
def __del__(self):
if _ins != None:
_ins.DeleteScanResult(self.scanner)
def __iter__(self):
return self
def done(self):
return _ins.ScanResultDone(self.scanner)
def error(self):
return SDKError[_ins.ScanResultError(self.scanner)]
def key(self):
buf = c_char_p()
buf_len = c_int()
ret = _ins.ScanResultKey(self.scanner, byref(buf), byref(buf_len))
if not ret:
return ""
key = string_at(buf, buf_len)
_ins.FreeString(buf)
return key
def value(self):
buf = c_char_p()
buf_len = c_int()
ret = _ins.ScanResultValue(self.scanner, byref(buf), byref(buf_len))
if not ret:
return ""
value = string_at(buf, buf_len)
_ins.FreeString(buf)
return value
def pair(self):
return self.key(), self.value()
def next(self):
if self.done():
raise StopIteration()
else:
key, value = self.pair()
_ins.ScanResultNext(self.scanner)
return key, value
# <-- ScanResult class definition ends here
_ins = CDLL('./libins_py.so')
def _set_function_sign():
_ins.SDKShowCluster.argtypes = [c_void_p, POINTER(c_int)]
_ins.SDKShowCluster.restype = c_void_p
_ins.SDKShowStatistics.argtypes = [c_void_p, POINTER(c_int)]
_ins.SDKShowStatistics.restype = c_void_p
_ins.SDKGet.argtypes = [c_void_p, c_char_p, c_int, POINTER(c_char_p), POINTER(c_int), POINTER(c_int)]
_ins.SDKGet.restype = c_bool
_ins.SDKPut.argtypes = [c_void_p, c_char_p, c_int, c_char_p, c_int, POINTER(c_int)]
_ins.SDKPut.restype = c_bool
_ins.SDKDelete.argtypes = [c_void_p, c_char_p, c_int, POINTER(c_int)]
_ins.SDKDelete.restype = c_bool
_ins.SDKScan.argtypes = [c_void_p, c_char_p, c_int, c_char_p, c_int]
_ins.SDKScan.restype = c_void_p
_ins.SDKWatch.argtypes = [c_void_p, c_char_p, c_int, InsSDK.WatchCallback, \
c_long, c_void_p, c_void_p]
_ins.SDKWatch.restype = c_bool
_ins.SDKLock.argtypes = [c_void_p, c_char_p, c_int, POINTER(c_int)]
_ins.SDKLock.restype = c_bool
_ins.SDKTryLock.argtypes = [c_void_p, c_char_p, c_int, POINTER(c_int)]
_ins.SDKTryLock.restype = c_bool
_ins.SDKUnLock.argtypes = [c_void_p, c_char_p, c_int, POINTER(c_int)]
_ins.SDKUnLock.restype = c_bool
_ins.SDKLogin.argtypes = [c_void_p, c_char_p, c_char_p, POINTER(c_int)]
_ins.SDKLogin.restype = c_bool
_ins.SDKLogout.argtypes = [c_void_p, POINTER(c_int)]
_ins.SDKLogout.restype = c_bool
_ins.SDKRegister.argtypes = [c_void_p, c_char_p, c_char_p, POINTER(c_int)]
_ins.SDKRegister.restype = c_bool
_ins.SDKGetSessionID.argtypes = [c_void_p]
_ins.SDKGetSessionID.restype = c_char_p
_ins.SDKGetCurrentUserID.argtypes = [c_void_p]
_ins.SDKGetCurrentUserID.restype = c_char_p
_ins.SDKIsLoggedIn.argtypes = [c_void_p]
_ins.SDKIsLoggedIn.restype = c_bool
_ins.SDKRegisterSessionTimeout.argtypes = [c_void_p, InsSDK.SessionTimeoutCallback, \
c_long, c_void_p]
_ins.ScanResultDone.argtypes = [c_void_p]
_ins.ScanResultDone.restype = c_bool
_ins.ScanResultError.argtypes = [c_void_p]
_ins.ScanResultError.restype = c_int
_ins.ScanResultKey.argtypes = [c_void_p, POINTER(c_char_p), POINTER(c_int)]
_ins.ScanResultKey.restype = c_bool
_ins.ScanResultValue.argtypes = [c_void_p, POINTER(c_char_p), POINTER(c_int)]
_ins.ScanResultValue.restype = c_bool
_ins.FreeString.argtypes = [c_char_p]
_ins.FreeString.restype = c_void_p
_set_function_sign()
| {
"content_hash": "fb62f8c73b80bb3ff216be9297e2d9ea",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 105,
"avg_line_length": 38.36092715231788,
"alnum_prop": 0.5507121277514027,
"repo_name": "fxsjy/ins",
"id": "2be7e9610bf7df901703e9af8f22d212865580cc",
"size": "11604",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdk/ins_sdk.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "273310"
},
{
"name": "Makefile",
"bytes": "5256"
},
{
"name": "Protocol Buffer",
"bytes": "6359"
},
{
"name": "Python",
"bytes": "14820"
},
{
"name": "Shell",
"bytes": "5208"
}
],
"symlink_target": ""
} |
PARAM_FILE_PATH = "param.file.path"
PLUGINS_DIR = "plugins.dir"
EXTENSIONS_DIR = "extensions.dir"
MB_IP = "mb.ip"
MB_PORT = "mb.port"
CARTRIDGE_KEY = "CARTRIDGE_KEY"
APPLICATION_ID = "APPLICATION_ID"
APPLICATION_PATH = "APPLICATION_PATH"
SERVICE_GROUP = "SERIVCE_GROUP"
SERVICE_NAME = "SERVICE_NAME"
CLUSTER_ID = "CLUSTER_ID"
CLUSTER_INSTANCE_ID = "CLUSTER_INSTANCE_ID"
MEMBER_ID = "MEMBER_ID"
INSTANCE_ID = "INSTANCE_ID"
LB_CLUSTER_ID = "LB_CLUSTER_ID"
NETWORK_PARTITION_ID = "NETWORK_PARTITION_ID"
PARTITION_ID = "PARTITION_ID"
TENANT_ID = "TENANT_ID"
REPO_URL = "REPO_URL"
PORTS = "PORTS"
PERSISTENCE_MAPPING = "PERSISTENCE_MAPPING"
DEPENDENCY_CLUSTER_IDS = "DEPENDENCY_CLUSTER_IDS"
EXPORT_METADATA_KEYS = "EXPORT_METADATA_KEYS"
IMPORT_METADATA_KEYS = "IMPORT_METADATA_KEYS"
CARTRIDGE_ALIAS = "CARTRIDGE_ALIAS"
TOKEN = "TOKEN"
LVS_VIRTUAL_IP = "LVS_VIRTUAL_IP"
# stratos.sh environment variables keys
LOG_FILE_PATHS = "log.file.paths"
MEMORY_CONSUMPTION = "memory_consumption"
LOAD_AVERAGE = "load_average"
PORTS_NOT_OPEN = "ports_not_open"
MULTITENANT = "MULTITENANT"
CLUSTERING = "CLUSTERING"
MIN_INSTANCE_COUNT = "MIN_COUNT"
ENABLE_ARTIFACT_UPDATE = "enable.artifact.update"
ARTIFACT_UPDATE_INTERVAL = "artifact.update.interval"
ARTIFACT_CLONE_RETRIES = "artifact.clone.retries"
ARTIFACT_CLONE_INTERVAL = "artifact.clone.interval"
COMMIT_ENABLED = "COMMIT_ENABLED"
AUTO_COMMIT = "auto.commit"
AUTO_CHECKOUT = "auto.checkout"
LISTEN_ADDRESS = "listen.address"
PROVIDER = "PROVIDER"
INTERNAL = "INTERNAL"
LB_PRIVATE_IP = "lb.private.ip"
LB_PUBLIC_IP = "lb.public.ip"
METADATA_SERVICE_URL = "metadata.service.url"
SERVICE_GROUP_TOPOLOGY_KEY = "payload_parameter.SERIVCE_GROUP"
CLUSTERING_TOPOLOGY_KEY = "payload_parameter.CLUSTERING"
CLUSTERING_PRIMARY_KEY = "PRIMARY"
SUPERTENANT_TEMP_PATH = "/tmp/-1234/"
SUPER_TENANT_REPO_PATH = "super.tenant.repository.path"
TENANT_REPO_PATH = "tenant.repository.path"
# topic names to subscribe
INSTANCE_NOTIFIER_TOPIC = "instance/#"
HEALTH_STAT_TOPIC = "health/#"
TOPOLOGY_TOPIC = "topology/#"
TENANT_TOPIC = "tenant/#"
INSTANCE_STATUS_TOPIC = "instance/status/"
APPLICATION_SIGNUP = "application/signup/#"
# Messaging Model
TENANT_RANGE_DELIMITER = "-"
# MB events
ARTIFACT_UPDATED_EVENT = "ArtifactUpdatedEvent"
INSTANCE_STARTED_EVENT = "InstanceStartedEvent"
INSTANCE_ACTIVATED_EVENT = "InstanceActivatedEvent"
INSTANCE_MAINTENANCE_MODE_EVENT = "InstanceMaintenanceModeEvent"
INSTANCE_READY_TO_SHUTDOWN_EVENT = "InstanceReadyToShutdownEvent"
INSTANCE_CLEANUP_CLUSTER_EVENT = "InstanceCleanupClusterEvent"
INSTANCE_CLEANUP_MEMBER_EVENT = "InstanceCleanupMemberEvent"
COMPLETE_TOPOLOGY_EVENT = "CompleteTopologyEvent"
COMPLETE_TENANT_EVENT = "CompleteTenantEvent"
DOMAIN_MAPPING_ADDED_EVENT = "DomainMappingAddedEvent"
DOMAIN_MAPPING_REMOVED_EVENT = "DomainMappingRemovedEvent"
MEMBER_INITIALIZED_EVENT = "MemberInitializedEvent"
MEMBER_ACTIVATED_EVENT = "MemberActivatedEvent"
MEMBER_TERMINATED_EVENT = "MemberTerminatedEvent"
MEMBER_SUSPENDED_EVENT = "MemberSuspendedEvent"
MEMBER_STARTED_EVENT = "MemberStartedEvent"
TENANT_SUBSCRIBED_EVENT = "TenantSubscribedEvent"
APPLICATION_SIGNUP_REMOVAL_EVENT = "ApplicationSignUpRemovedEvent"
CREATE_LVS_DUMMY_INTERFACE = "CreateLVSDummyInterface"
PRIMARY = "PRIMARY"
MIN_COUNT = "MIN_COUNT"
# multi tenant constants
INVALID_TENANT_ID = "-1"
SUPER_TENANT_ID = "-1234"
DATE_FORMAT = "%Y.%m.%d"
PORT_CHECK_TIMEOUT = "port.check.timeout"
CEP_PUBLISHER_ENABLED = "cep.stats.publisher.enabled"
CEP_RECEIVER_IP = "thrift.receiver.ip"
CEP_RECEIVER_PORT = "thrift.receiver.port"
CEP_SERVER_ADMIN_USERNAME = "thrift.server.admin.username"
CEP_SERVER_ADMIN_PASSWORD = "thrift.server.admin.password"
MONITORING_PUBLISHER_ENABLED = "enable.data.publisher"
MONITORING_RECEIVER_IP = "monitoring.server.ip"
MONITORING_RECEIVER_PORT = "monitoring.server.port"
MONITORING_RECEIVER_SECURE_PORT = "monitoring.server.secure.port"
MONITORING_SERVER_ADMIN_USERNAME = "monitoring.server.admin.username"
MONITORING_SERVER_ADMIN_PASSWORD = "monitoring.server.admin.password"
| {
"content_hash": "ebff24d9ea085244ac9e17d4d427fb59",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 69,
"avg_line_length": 34.64957264957265,
"alnum_prop": 0.7760236803157375,
"repo_name": "dinithis/stratos",
"id": "55106599300c0c8ddf85192f7385b86c77eb98f2",
"size": "4838",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "17184"
},
{
"name": "C",
"bytes": "27195"
},
{
"name": "CSS",
"bytes": "71867"
},
{
"name": "HTML",
"bytes": "7586"
},
{
"name": "Handlebars",
"bytes": "136406"
},
{
"name": "Java",
"bytes": "5946275"
},
{
"name": "JavaScript",
"bytes": "743237"
},
{
"name": "Python",
"bytes": "518762"
},
{
"name": "Ruby",
"bytes": "3546"
},
{
"name": "Shell",
"bytes": "124630"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class StorageAccountUpdateParameters(Model):
"""The parameters to update on the account.
:param tags: Resource tags
:type tags: dict
:param account_type: Gets or sets the account type. Note that StandardZRS
and PremiumLRS accounts cannot be changed to other account types, and
other account types cannot be changed to StandardZRS or PremiumLRS.
Possible values include: 'Standard_LRS', 'Standard_ZRS', 'Standard_GRS',
'Standard_RAGRS', 'Premium_LRS'
:type account_type: str or :class:`AccountType
<storage.models.AccountType>`
:param custom_domain: User domain assigned to the storage account. Name is
the CNAME source. Only one custom domain is supported per storage account
at this time. To clear the existing custom domain, use an empty string for
the custom domain name property.
:type custom_domain: :class:`CustomDomain <storage.models.CustomDomain>`
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'account_type': {'key': 'properties.accountType', 'type': 'AccountType'},
'custom_domain': {'key': 'properties.customDomain', 'type': 'CustomDomain'},
}
def __init__(self, tags=None, account_type=None, custom_domain=None):
self.tags = tags
self.account_type = account_type
self.custom_domain = custom_domain
| {
"content_hash": "e50bfab47e542057371c837a110092d4",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 84,
"avg_line_length": 43.9375,
"alnum_prop": 0.686344238975818,
"repo_name": "lmazuel/autorest",
"id": "c063ee8b044f6746de883b94a3d212f5fa824e64",
"size": "1728",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Samples/2a-validation/Python/storage/models/storage_account_update_parameters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36"
},
{
"name": "C#",
"bytes": "15043916"
},
{
"name": "CSS",
"bytes": "110"
},
{
"name": "CoffeeScript",
"bytes": "64212"
},
{
"name": "Go",
"bytes": "149926"
},
{
"name": "HTML",
"bytes": "274"
},
{
"name": "Java",
"bytes": "7894733"
},
{
"name": "JavaScript",
"bytes": "6955414"
},
{
"name": "PowerShell",
"bytes": "41223"
},
{
"name": "Python",
"bytes": "2111184"
},
{
"name": "Ruby",
"bytes": "182108"
},
{
"name": "Shell",
"bytes": "196"
},
{
"name": "TypeScript",
"bytes": "465386"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01")) # type: Literal["2021-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1, pattern=r"^[-\w\._\(\)]+$"
),
"accountName": _SERIALIZER.url("account_name", account_name, "str", max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"blobInventoryPolicyName": _SERIALIZER.url("blob_inventory_policy_name", blob_inventory_policy_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01")) # type: Literal["2021-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1, pattern=r"^[-\w\._\(\)]+$"
),
"accountName": _SERIALIZER.url("account_name", account_name, "str", max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"blobInventoryPolicyName": _SERIALIZER.url("blob_inventory_policy_name", blob_inventory_policy_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01")) # type: Literal["2021-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1, pattern=r"^[-\w\._\(\)]+$"
),
"accountName": _SERIALIZER.url("account_name", account_name, "str", max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"blobInventoryPolicyName": _SERIALIZER.url("blob_inventory_policy_name", blob_inventory_policy_name, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_request(resource_group_name: str, account_name: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01")) # type: Literal["2021-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1, pattern=r"^[-\w\._\(\)]+$"
),
"accountName": _SERIALIZER.url("account_name", account_name, "str", max_length=24, min_length=3),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class BlobInventoryPoliciesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2021_06_01.StorageManagementClient`'s
:attr:`blob_inventory_policies` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
**kwargs: Any
) -> _models.BlobInventoryPolicy:
"""Gets the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01")) # type: Literal["2021-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BlobInventoryPolicy]
request = build_get_request(
resource_group_name=resource_group_name,
account_name=account_name,
blob_inventory_policy_name=blob_inventory_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("BlobInventoryPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}"} # type: ignore
@overload
def create_or_update(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
properties: _models.BlobInventoryPolicy,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.BlobInventoryPolicy:
"""Sets the blob inventory policy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicyName
:param properties: The blob inventory policy set to a storage account. Required.
:type properties: ~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicy
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
properties: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.BlobInventoryPolicy:
"""Sets the blob inventory policy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicyName
:param properties: The blob inventory policy set to a storage account. Required.
:type properties: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
properties: Union[_models.BlobInventoryPolicy, IO],
**kwargs: Any
) -> _models.BlobInventoryPolicy:
"""Sets the blob inventory policy to the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicyName
:param properties: The blob inventory policy set to a storage account. Is either a model type
or a IO type. Required.
:type properties: ~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicy or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BlobInventoryPolicy or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicy
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01")) # type: Literal["2021-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.BlobInventoryPolicy]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(properties, (IO, bytes)):
_content = properties
else:
_json = self._serialize.body(properties, "BlobInventoryPolicy")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
account_name=account_name,
blob_inventory_policy_name=blob_inventory_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("BlobInventoryPolicy", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
account_name: str,
blob_inventory_policy_name: Union[str, _models.BlobInventoryPolicyName],
**kwargs: Any
) -> None:
"""Deletes the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:param blob_inventory_policy_name: The name of the storage account blob inventory policy. It
should always be 'default'. "default" Required.
:type blob_inventory_policy_name: str or
~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicyName
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01")) # type: Literal["2021-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
account_name=account_name,
blob_inventory_policy_name=blob_inventory_policy_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies/{blobInventoryPolicyName}"} # type: ignore
@distributed_trace
def list(
self, resource_group_name: str, account_name: str, **kwargs: Any
) -> Iterable["_models.BlobInventoryPolicy"]:
"""Gets the blob inventory policy associated with the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive. Required.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only. Required.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either BlobInventoryPolicy or the result of cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_06_01.models.BlobInventoryPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2021-06-01")) # type: Literal["2021-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.ListBlobInventoryPolicy]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("ListBlobInventoryPolicy", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/inventoryPolicies"} # type: ignore
| {
"content_hash": "2e0270bbef1eab9ca949a44141a36bfc",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 227,
"avg_line_length": 47.08710217755444,
"alnum_prop": 0.6563267048486358,
"repo_name": "Azure/azure-sdk-for-python",
"id": "54e7b3ab010ae28be19b56e091898ce2d287c527",
"size": "28611",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_06_01/operations/_blob_inventory_policies_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import sys
import os
# Make sure we're using the right python. Assume we're running out of the venv.
INTERP = os.path.join(os.getcwd(), 'bin', 'python')
if sys.executable != INTERP:
os.execl(INTERP, INTERP, *sys.argv)
sys.path.append(os.getcwd())
import configparser
import importlib
config = configparser.ConfigParser()
config.read("config.ini")
bots = {}
for bot in config.sections():
if "disabled" in config[bot] and config[bot]["webhook"] == "1":
print("Bot {0} disabled".format(bot))
continue
if "webhook" not in config[bot] or config[bot]["webhook"] != "1":
print("Bot {0} not using webhook".format(bot))
continue
if "repo_name" not in config[bot]:
raise RuntimeError("Cannot find repo for bot {0}".format(bot))
bot_path = os.path.join(os.getcwd(), config[bot]["repo_name"])
if not os.path.isdir(bot_path):
raise RuntimeError("Cannot find path {0} for bot {1}".format(bot_path,
bot))
sys.path.append(bot_path)
# Assume the bot module is the same as the config file
if "module_name" not in config[bot]:
raise RuntimeError("Cannot find module for bot {0}".format(bot))
module = config[bot]["module_name"]
importlib.import_module(module)
bots[config[bot]["token"]] = getattr(sys.modules[module],
"create_webhook_bot")(config[bot])
if len(bots.keys()) == 0:
raise RuntimeError("Not running any bots!")
from flask import Flask, request
import telegram
application = Flask(__name__)
@application.route('/')
def hello():
return ""
@application.route('/telegram/<token>', methods=['POST'])
def webhook(token):
update = telegram.update.Update.de_json(request.get_json(force=True))
if token not in bots.keys():
return 'OK'
bots[token].update_queue.put(update)
return 'OK'
if __name__ == "__main__":
application.run()
| {
"content_hash": "794febcb48d932d0a4d9d0db4b72ce35",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.6202020202020202,
"repo_name": "qdot/np-telegram-bot",
"id": "9b1b615b59000769817f0719b26553960fb4b455",
"size": "1980",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "passenger_wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "36510"
},
{
"name": "Shell",
"bytes": "114"
}
],
"symlink_target": ""
} |
"""Support for OwnTracks."""
from collections import defaultdict
import json
import logging
import re
from aiohttp.web import json_response
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import mqtt
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_when_setup
from .config_flow import CONF_SECRET
REQUIREMENTS = ['PyNaCl==1.3.0']
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'owntracks'
DEPENDENCIES = ['webhook']
CONF_MAX_GPS_ACCURACY = 'max_gps_accuracy'
CONF_WAYPOINT_IMPORT = 'waypoints'
CONF_WAYPOINT_WHITELIST = 'waypoint_whitelist'
CONF_MQTT_TOPIC = 'mqtt_topic'
CONF_REGION_MAPPING = 'region_mapping'
CONF_EVENTS_ONLY = 'events_only'
BEACON_DEV_ID = 'beacon'
DEFAULT_OWNTRACKS_TOPIC = 'owntracks/#'
CONFIG_SCHEMA = vol.Schema({
vol.Optional(DOMAIN, default={}): {
vol.Optional(CONF_MAX_GPS_ACCURACY): vol.Coerce(float),
vol.Optional(CONF_WAYPOINT_IMPORT, default=True): cv.boolean,
vol.Optional(CONF_EVENTS_ONLY, default=False): cv.boolean,
vol.Optional(CONF_MQTT_TOPIC, default=DEFAULT_OWNTRACKS_TOPIC):
mqtt.valid_subscribe_topic,
vol.Optional(CONF_WAYPOINT_WHITELIST): vol.All(
cv.ensure_list, [cv.string]),
vol.Optional(CONF_SECRET): vol.Any(
vol.Schema({vol.Optional(cv.string): cv.string}),
cv.string),
vol.Optional(CONF_REGION_MAPPING, default={}): dict,
vol.Optional(CONF_WEBHOOK_ID): cv.string,
}
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, config):
"""Initialize OwnTracks component."""
hass.data[DOMAIN] = {
'config': config[DOMAIN]
}
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(hass.config_entries.flow.async_init(
DOMAIN, context={'source': config_entries.SOURCE_IMPORT},
data={}
))
return True
async def async_setup_entry(hass, entry):
"""Set up OwnTracks entry."""
config = hass.data[DOMAIN]['config']
max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
waypoint_import = config.get(CONF_WAYPOINT_IMPORT)
waypoint_whitelist = config.get(CONF_WAYPOINT_WHITELIST)
secret = config.get(CONF_SECRET) or entry.data[CONF_SECRET]
region_mapping = config.get(CONF_REGION_MAPPING)
events_only = config.get(CONF_EVENTS_ONLY)
mqtt_topic = config.get(CONF_MQTT_TOPIC)
context = OwnTracksContext(hass, secret, max_gps_accuracy,
waypoint_import, waypoint_whitelist,
region_mapping, events_only, mqtt_topic)
webhook_id = config.get(CONF_WEBHOOK_ID) or entry.data[CONF_WEBHOOK_ID]
hass.data[DOMAIN]['context'] = context
async_when_setup(hass, 'mqtt', async_connect_mqtt)
hass.components.webhook.async_register(
DOMAIN, 'OwnTracks', webhook_id, handle_webhook)
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
entry, 'device_tracker'))
return True
async def async_connect_mqtt(hass, component):
"""Subscribe to MQTT topic."""
context = hass.data[DOMAIN]['context']
async def async_handle_mqtt_message(topic, payload, qos):
"""Handle incoming OwnTracks message."""
try:
message = json.loads(payload)
except ValueError:
# If invalid JSON
_LOGGER.error("Unable to parse payload as JSON: %s", payload)
return
message['topic'] = topic
hass.helpers.dispatcher.async_dispatcher_send(
DOMAIN, hass, context, message)
await hass.components.mqtt.async_subscribe(
context.mqtt_topic, async_handle_mqtt_message, 1)
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle webhook callback.
iOS sets the "topic" as part of the payload.
Android does not set a topic but adds headers to the request.
"""
context = hass.data[DOMAIN]['context']
try:
message = await request.json()
except ValueError:
_LOGGER.warning('Received invalid JSON from OwnTracks')
return json_response([])
# Android doesn't populate topic
if 'topic' not in message:
headers = request.headers
user = headers.get('X-Limit-U')
device = headers.get('X-Limit-D', user)
if user:
topic_base = re.sub('/#$', '', context.mqtt_topic)
message['topic'] = '{}/{}/{}'.format(topic_base, user, device)
elif message['_type'] != 'encrypted':
_LOGGER.warning('No topic or user found in message. If on Android,'
' set a username in Connection -> Identification')
# Keep it as a 200 response so the incorrect packet is discarded
return json_response([])
hass.helpers.dispatcher.async_dispatcher_send(
DOMAIN, hass, context, message)
return json_response([])
class OwnTracksContext:
"""Hold the current OwnTracks context."""
def __init__(self, hass, secret, max_gps_accuracy, import_waypoints,
waypoint_whitelist, region_mapping, events_only, mqtt_topic):
"""Initialize an OwnTracks context."""
self.hass = hass
self.secret = secret
self.max_gps_accuracy = max_gps_accuracy
self.mobile_beacons_active = defaultdict(set)
self.regions_entered = defaultdict(list)
self.import_waypoints = import_waypoints
self.waypoint_whitelist = waypoint_whitelist
self.region_mapping = region_mapping
self.events_only = events_only
self.mqtt_topic = mqtt_topic
@callback
def async_valid_accuracy(self, message):
"""Check if we should ignore this message."""
acc = message.get('acc')
if acc is None:
return False
try:
acc = float(acc)
except ValueError:
return False
if acc == 0:
_LOGGER.warning(
"Ignoring %s update because GPS accuracy is zero: %s",
message['_type'], message)
return False
if self.max_gps_accuracy is not None and \
acc > self.max_gps_accuracy:
_LOGGER.info("Ignoring %s update because expected GPS "
"accuracy %s is not met: %s",
message['_type'], self.max_gps_accuracy,
message)
return False
return True
async def async_see(self, **data):
"""Send a see message to the device tracker."""
raise NotImplementedError
async def async_see_beacons(self, hass, dev_id, kwargs_param):
"""Set active beacons to the current location."""
kwargs = kwargs_param.copy()
# Mobile beacons should always be set to the location of the
# tracking device. I get the device state and make the necessary
# changes to kwargs.
device_tracker_state = hass.states.get(
"device_tracker.{}".format(dev_id))
if device_tracker_state is not None:
acc = device_tracker_state.attributes.get("gps_accuracy")
lat = device_tracker_state.attributes.get("latitude")
lon = device_tracker_state.attributes.get("longitude")
kwargs['gps_accuracy'] = acc
kwargs['gps'] = (lat, lon)
# the battery state applies to the tracking device, not the beacon
# kwargs location is the beacon's configured lat/lon
kwargs.pop('battery', None)
for beacon in self.mobile_beacons_active[dev_id]:
kwargs['dev_id'] = "{}_{}".format(BEACON_DEV_ID, beacon)
kwargs['host_name'] = beacon
await self.async_see(**kwargs)
| {
"content_hash": "f867e5c14e54b77761d8587c90aff463",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 79,
"avg_line_length": 34.41048034934498,
"alnum_prop": 0.6298223350253808,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "c0d3d152270a35848d740dfa9d9163367ce19779",
"size": "7880",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/owntracks/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
from ray.tune._structure_refactor import warn_structure_refactor
from ray.air.integrations.comet import * # noqa: F401, F403
warn_structure_refactor(__name__, "ray.air.integrations.comet")
| {
"content_hash": "306cb9becc7eabefa2c74a4e75c9fe77",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 64,
"avg_line_length": 47.75,
"alnum_prop": 0.774869109947644,
"repo_name": "ray-project/ray",
"id": "5ef4983186f73c1d987e7927482d7f4c8611266c",
"size": "191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/air/callbacks/comet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
import collections
import inspect
import os
from ansible.plugins import action
from six import StringIO
from oslo_config import iniparser
class OverrideConfigParser(iniparser.BaseParser):
def __init__(self):
self._cur_sections = collections.OrderedDict()
self._sections = collections.OrderedDict()
self._cur_section = None
def assignment(self, key, value):
cur_value = self._cur_section.get(key)
if len(value) == 1 and value[0] == '':
value = []
if not cur_value:
self._cur_section[key] = [value]
else:
self._cur_section[key].append(value)
def parse(self, lineiter):
self._cur_sections = collections.OrderedDict()
super(OverrideConfigParser, self).parse(lineiter)
# merge _cur_sections into _sections
for section, values in self._cur_sections.items():
if section not in self._sections:
self._sections[section] = collections.OrderedDict()
for key, value in values.items():
self._sections[section][key] = value
def new_section(self, section):
cur_section = self._cur_sections.get(section)
if not cur_section:
cur_section = collections.OrderedDict()
self._cur_sections[section] = cur_section
self._cur_section = cur_section
return cur_section
def write(self, fp):
def write_key_value(key, values):
for v in values:
if not v:
fp.write('{} =\n'.format(key))
for index, value in enumerate(v):
if index == 0:
fp.write('{} = {}\n'.format(key, value))
else:
fp.write('{} {}\n'.format(len(key)*' ', value))
def write_section(section):
for key, values in section.items():
write_key_value(key, values)
for section in self._sections:
fp.write('[{}]\n'.format(section))
write_section(self._sections[section])
fp.write('\n')
class ActionModule(action.ActionBase):
TRANSFERS_FILES = True
def read_config(self, source, config):
# Only use config if present
if os.access(source, os.R_OK):
with open(source, 'r') as f:
template_data = f.read()
result = self._templar.template(template_data)
fakefile = StringIO(result)
config.parse(fakefile)
fakefile.close()
def run(self, tmp=None, task_vars=None):
if task_vars is None:
task_vars = dict()
result = super(ActionModule, self).run(tmp, task_vars)
# NOTE(jeffrey4l): Ansible 2.1 add a remote_user param to the
# _make_tmp_path function. inspect the number of the args here. In
# this way, ansible 2.0 and ansible 2.1 are both supported
make_tmp_path_args = inspect.getargspec(self._make_tmp_path)[0]
if not tmp and len(make_tmp_path_args) == 1:
tmp = self._make_tmp_path()
if not tmp and len(make_tmp_path_args) == 2:
remote_user = (task_vars.get('ansible_user')
or self._play_context.remote_user)
tmp = self._make_tmp_path(remote_user)
sources = self._task.args.get('sources', None)
extra_vars = self._task.args.get('vars', list())
if not isinstance(sources, list):
sources = [sources]
temp_vars = task_vars.copy()
temp_vars.update(extra_vars)
config = OverrideConfigParser()
old_vars = self._templar._available_variables
self._templar.set_available_variables(temp_vars)
for source in sources:
self.read_config(source, config)
self._templar.set_available_variables(old_vars)
# Dump configparser to string via an emulated file
fakefile = StringIO()
config.write(fakefile)
remote_path = self._connection._shell.join_path(tmp, 'src')
xfered = self._transfer_data(remote_path, fakefile.getvalue())
fakefile.close()
new_module_args = self._task.args.copy()
new_module_args.pop('vars', None)
new_module_args.pop('sources', None)
new_module_args.update(
dict(
src=xfered
)
)
result.update(self._execute_module(module_name='copy',
module_args=new_module_args,
task_vars=task_vars,
tmp=tmp))
return result
| {
"content_hash": "be91ac7dd580081879d339d795451877",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 75,
"avg_line_length": 33.88405797101449,
"alnum_prop": 0.5571000855431993,
"repo_name": "gokulpch/OpenContrail-Kolla",
"id": "27fcc532324cdffb4d956142add5402f9672dbe4",
"size": "5300",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kolla-ansible/ansible/action_plugins/merge_configs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135209"
},
{
"name": "Ruby",
"bytes": "11345"
},
{
"name": "Shell",
"bytes": "70724"
}
],
"symlink_target": ""
} |
from helper import unittest, PillowTestCase, hopper
from PIL import Image
from PIL import ImageFilter
class TestImageFilter(PillowTestCase):
def test_sanity(self):
def filter(filter):
im = hopper("L")
out = im.filter(filter)
self.assertEqual(out.mode, im.mode)
self.assertEqual(out.size, im.size)
filter(ImageFilter.BLUR)
filter(ImageFilter.CONTOUR)
filter(ImageFilter.DETAIL)
filter(ImageFilter.EDGE_ENHANCE)
filter(ImageFilter.EDGE_ENHANCE_MORE)
filter(ImageFilter.EMBOSS)
filter(ImageFilter.FIND_EDGES)
filter(ImageFilter.SMOOTH)
filter(ImageFilter.SMOOTH_MORE)
filter(ImageFilter.SHARPEN)
filter(ImageFilter.MaxFilter)
filter(ImageFilter.MedianFilter)
filter(ImageFilter.MinFilter)
filter(ImageFilter.ModeFilter)
filter(ImageFilter.Kernel((3, 3), list(range(9))))
filter(ImageFilter.GaussianBlur)
filter(ImageFilter.GaussianBlur(5))
filter(ImageFilter.UnsharpMask)
filter(ImageFilter.UnsharpMask(10))
self.assertRaises(TypeError, lambda: filter("hello"))
def test_crash(self):
# crashes on small images
im = Image.new("RGB", (1, 1))
im.filter(ImageFilter.SMOOTH)
im = Image.new("RGB", (2, 2))
im.filter(ImageFilter.SMOOTH)
im = Image.new("RGB", (3, 3))
im.filter(ImageFilter.SMOOTH)
def test_modefilter(self):
def modefilter(mode):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
mod = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
im.putdata([0, 0, 1, 2, 5, 1, 5, 2, 0]) # mode=0
mod2 = im.filter(ImageFilter.ModeFilter).getpixel((1, 1))
return mod, mod2
self.assertEqual(modefilter("1"), (4, 0))
self.assertEqual(modefilter("L"), (4, 0))
self.assertEqual(modefilter("P"), (4, 0))
self.assertEqual(modefilter("RGB"), ((4, 0, 0), (0, 0, 0)))
def test_rankfilter(self):
def rankfilter(mode):
im = Image.new(mode, (3, 3), None)
im.putdata(list(range(9)))
# image is:
# 0 1 2
# 3 4 5
# 6 7 8
minimum = im.filter(ImageFilter.MinFilter).getpixel((1, 1))
med = im.filter(ImageFilter.MedianFilter).getpixel((1, 1))
maximum = im.filter(ImageFilter.MaxFilter).getpixel((1, 1))
return minimum, med, maximum
self.assertEqual(rankfilter("1"), (0, 4, 8))
self.assertEqual(rankfilter("L"), (0, 4, 8))
self.assertRaises(ValueError, lambda: rankfilter("P"))
self.assertEqual(rankfilter("RGB"), ((0, 0, 0), (4, 0, 0), (8, 0, 0)))
self.assertEqual(rankfilter("I"), (0, 4, 8))
self.assertEqual(rankfilter("F"), (0.0, 4.0, 8.0))
if __name__ == '__main__':
unittest.main()
# End of file
| {
"content_hash": "eb55e8a231b640166f2bf5110327100e",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 78,
"avg_line_length": 32.48421052631579,
"alnum_prop": 0.568697342838626,
"repo_name": "mollstam/UnrealPy",
"id": "6a694b3ca64d5d674fb9a1a46d3aaf0ef06669b1",
"size": "3086",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Pillow-2.9.0/Tests/test_image_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "APL",
"bytes": "587"
},
{
"name": "ASP",
"bytes": "2753"
},
{
"name": "ActionScript",
"bytes": "5686"
},
{
"name": "Ada",
"bytes": "94225"
},
{
"name": "Agda",
"bytes": "3154"
},
{
"name": "Alloy",
"bytes": "6579"
},
{
"name": "ApacheConf",
"bytes": "12482"
},
{
"name": "AppleScript",
"bytes": "421"
},
{
"name": "Assembly",
"bytes": "1093261"
},
{
"name": "AutoHotkey",
"bytes": "3733"
},
{
"name": "AutoIt",
"bytes": "667"
},
{
"name": "Awk",
"bytes": "63276"
},
{
"name": "Batchfile",
"bytes": "147828"
},
{
"name": "BlitzBasic",
"bytes": "185102"
},
{
"name": "BlitzMax",
"bytes": "2387"
},
{
"name": "Boo",
"bytes": "1111"
},
{
"name": "Bro",
"bytes": "7337"
},
{
"name": "C",
"bytes": "108397183"
},
{
"name": "C#",
"bytes": "156749"
},
{
"name": "C++",
"bytes": "13535833"
},
{
"name": "CLIPS",
"bytes": "6933"
},
{
"name": "CMake",
"bytes": "12441"
},
{
"name": "COBOL",
"bytes": "114812"
},
{
"name": "CSS",
"bytes": "430375"
},
{
"name": "Ceylon",
"bytes": "1387"
},
{
"name": "Chapel",
"bytes": "4366"
},
{
"name": "Cirru",
"bytes": "2574"
},
{
"name": "Clean",
"bytes": "9679"
},
{
"name": "Clojure",
"bytes": "23871"
},
{
"name": "CoffeeScript",
"bytes": "20149"
},
{
"name": "ColdFusion",
"bytes": "9006"
},
{
"name": "Common Lisp",
"bytes": "49017"
},
{
"name": "Coq",
"bytes": "66"
},
{
"name": "Cucumber",
"bytes": "390"
},
{
"name": "Cuda",
"bytes": "776"
},
{
"name": "D",
"bytes": "7556"
},
{
"name": "DIGITAL Command Language",
"bytes": "425938"
},
{
"name": "DTrace",
"bytes": "6706"
},
{
"name": "Dart",
"bytes": "591"
},
{
"name": "Dylan",
"bytes": "6343"
},
{
"name": "Ecl",
"bytes": "2599"
},
{
"name": "Eiffel",
"bytes": "2145"
},
{
"name": "Elixir",
"bytes": "4340"
},
{
"name": "Emacs Lisp",
"bytes": "18303"
},
{
"name": "Erlang",
"bytes": "5746"
},
{
"name": "F#",
"bytes": "19156"
},
{
"name": "FORTRAN",
"bytes": "38458"
},
{
"name": "Factor",
"bytes": "10194"
},
{
"name": "Fancy",
"bytes": "2581"
},
{
"name": "Fantom",
"bytes": "25331"
},
{
"name": "GAP",
"bytes": "29880"
},
{
"name": "GLSL",
"bytes": "450"
},
{
"name": "Gnuplot",
"bytes": "11501"
},
{
"name": "Go",
"bytes": "5444"
},
{
"name": "Golo",
"bytes": "1649"
},
{
"name": "Gosu",
"bytes": "2853"
},
{
"name": "Groff",
"bytes": "3458639"
},
{
"name": "Groovy",
"bytes": "2586"
},
{
"name": "HTML",
"bytes": "92126540"
},
{
"name": "Haskell",
"bytes": "49593"
},
{
"name": "Haxe",
"bytes": "16812"
},
{
"name": "Hy",
"bytes": "7237"
},
{
"name": "IDL",
"bytes": "2098"
},
{
"name": "Idris",
"bytes": "2771"
},
{
"name": "Inform 7",
"bytes": "1944"
},
{
"name": "Inno Setup",
"bytes": "18796"
},
{
"name": "Ioke",
"bytes": "469"
},
{
"name": "Isabelle",
"bytes": "21392"
},
{
"name": "Jasmin",
"bytes": "9428"
},
{
"name": "Java",
"bytes": "4040623"
},
{
"name": "JavaScript",
"bytes": "223927"
},
{
"name": "Julia",
"bytes": "27687"
},
{
"name": "KiCad",
"bytes": "475"
},
{
"name": "Kotlin",
"bytes": "971"
},
{
"name": "LSL",
"bytes": "160"
},
{
"name": "Lasso",
"bytes": "18650"
},
{
"name": "Lean",
"bytes": "6921"
},
{
"name": "Limbo",
"bytes": "9891"
},
{
"name": "Liquid",
"bytes": "862"
},
{
"name": "LiveScript",
"bytes": "972"
},
{
"name": "Logos",
"bytes": "19509"
},
{
"name": "Logtalk",
"bytes": "7260"
},
{
"name": "Lua",
"bytes": "8677"
},
{
"name": "Makefile",
"bytes": "2053844"
},
{
"name": "Mask",
"bytes": "815"
},
{
"name": "Mathematica",
"bytes": "191"
},
{
"name": "Max",
"bytes": "296"
},
{
"name": "Modelica",
"bytes": "6213"
},
{
"name": "Modula-2",
"bytes": "23838"
},
{
"name": "Module Management System",
"bytes": "14798"
},
{
"name": "Monkey",
"bytes": "2587"
},
{
"name": "Moocode",
"bytes": "3343"
},
{
"name": "MoonScript",
"bytes": "14862"
},
{
"name": "Myghty",
"bytes": "3939"
},
{
"name": "NSIS",
"bytes": "7663"
},
{
"name": "Nemerle",
"bytes": "1517"
},
{
"name": "NewLisp",
"bytes": "42726"
},
{
"name": "Nimrod",
"bytes": "37191"
},
{
"name": "Nit",
"bytes": "55581"
},
{
"name": "Nix",
"bytes": "2448"
},
{
"name": "OCaml",
"bytes": "42416"
},
{
"name": "Objective-C",
"bytes": "104883"
},
{
"name": "Objective-J",
"bytes": "15340"
},
{
"name": "Opa",
"bytes": "172"
},
{
"name": "OpenEdge ABL",
"bytes": "49943"
},
{
"name": "PAWN",
"bytes": "6555"
},
{
"name": "PHP",
"bytes": "68611"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Pan",
"bytes": "1241"
},
{
"name": "Pascal",
"bytes": "349743"
},
{
"name": "Perl",
"bytes": "5931502"
},
{
"name": "Perl6",
"bytes": "113623"
},
{
"name": "PigLatin",
"bytes": "6657"
},
{
"name": "Pike",
"bytes": "8479"
},
{
"name": "PostScript",
"bytes": "18216"
},
{
"name": "PowerShell",
"bytes": "14236"
},
{
"name": "Prolog",
"bytes": "43750"
},
{
"name": "Protocol Buffer",
"bytes": "3401"
},
{
"name": "Puppet",
"bytes": "130"
},
{
"name": "Python",
"bytes": "122886305"
},
{
"name": "QML",
"bytes": "3912"
},
{
"name": "R",
"bytes": "49247"
},
{
"name": "Racket",
"bytes": "11341"
},
{
"name": "Rebol",
"bytes": "17708"
},
{
"name": "Red",
"bytes": "10536"
},
{
"name": "Redcode",
"bytes": "830"
},
{
"name": "Ruby",
"bytes": "91403"
},
{
"name": "Rust",
"bytes": "6788"
},
{
"name": "SAS",
"bytes": "15603"
},
{
"name": "SaltStack",
"bytes": "1040"
},
{
"name": "Scala",
"bytes": "730"
},
{
"name": "Scheme",
"bytes": "50346"
},
{
"name": "Scilab",
"bytes": "943"
},
{
"name": "Shell",
"bytes": "2925518"
},
{
"name": "ShellSession",
"bytes": "320"
},
{
"name": "Smali",
"bytes": "832"
},
{
"name": "Smalltalk",
"bytes": "158636"
},
{
"name": "Smarty",
"bytes": "523"
},
{
"name": "SourcePawn",
"bytes": "130"
},
{
"name": "Standard ML",
"bytes": "36869"
},
{
"name": "Swift",
"bytes": "2035"
},
{
"name": "SystemVerilog",
"bytes": "265"
},
{
"name": "Tcl",
"bytes": "6077233"
},
{
"name": "TeX",
"bytes": "487999"
},
{
"name": "Tea",
"bytes": "391"
},
{
"name": "TypeScript",
"bytes": "535"
},
{
"name": "VHDL",
"bytes": "4446"
},
{
"name": "VimL",
"bytes": "32053"
},
{
"name": "Visual Basic",
"bytes": "19441"
},
{
"name": "XQuery",
"bytes": "4289"
},
{
"name": "XS",
"bytes": "178055"
},
{
"name": "XSLT",
"bytes": "1995174"
},
{
"name": "Xtend",
"bytes": "727"
},
{
"name": "Yacc",
"bytes": "25665"
},
{
"name": "Zephir",
"bytes": "485"
},
{
"name": "eC",
"bytes": "31545"
},
{
"name": "mupad",
"bytes": "2442"
},
{
"name": "nesC",
"bytes": "23697"
},
{
"name": "xBase",
"bytes": "3349"
}
],
"symlink_target": ""
} |
import argparse
import time
from pprint import pprint
import pyeapi
def rm_vlan_func(remove):
if args.name in current_vlans:
print 'VLAN', args.name, 'exists. Deleting now!'
pynet_sw3.config(rm_vlan)
else:
print "VLAN", args.name, "does not exist"
def add_vlan_func(name):
if args.name in current_vlans:
print 'VLAN', args.name, 'already exists'
else:
print "VLAN", args.name, "does not exist"
print "Now adding VLAN", args.name
pynet_sw3.config(add_vlan)
def main():
pprint(vlans)
add_vlan_func(args.name)
pprint(vlans)
time.sleep(3)
rm_vlan_func(args.NAME)
pprint(vlans)
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--remove", help="VLAN Number")
parser.add_argument("--name", help="VLAN name")
args = parser.parse_args()
pynet_sw3 = pyeapi.connect_to("pynet-sw3")
vlans_retreive = pynet_sw3.enable("show vlan")
vlans_list_strip = vlans_retreive[0]
#pprint(vlans_list_strip)
vlans_dict = vlans_list_strip['result']
vlans = vlans_dict['vlans']
current_vlans = vlans.keys()
rm_vlan = ['no', args.remove]
add_vlan = [args.name]
pprint(current_vlans)
main()
| {
"content_hash": "b5876084bd2bcbca0cc1a407cc70620b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 55,
"avg_line_length": 24.4,
"alnum_prop": 0.6491803278688525,
"repo_name": "bonno800/pynet",
"id": "def86b73586783bfab999ad76a699d5f80944495",
"size": "1220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "class7/exercise2ParserFIXED.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "66890"
}
],
"symlink_target": ""
} |
import argparse
import logging
import sys
from typing import Any
from typing import Callable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import Union
import a_sync
from marathon import MarathonClient
from marathon.models.task import MarathonTask
from mypy_extensions import Arg
from mypy_extensions import NamedArg
from paasta_tools.kubernetes_tools import get_all_nodes
from paasta_tools.kubernetes_tools import get_all_pods
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import V1Node
from paasta_tools.kubernetes_tools import V1Pod
from paasta_tools.marathon_tools import get_marathon_clients
from paasta_tools.marathon_tools import get_marathon_servers
from paasta_tools.mesos_tools import get_slaves
from paasta_tools.monitoring_tools import ReplicationChecker
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.smartstack_tools import KubeSmartstackEnvoyReplicationChecker
from paasta_tools.smartstack_tools import MesosSmartstackEnvoyReplicationChecker
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import InstanceConfig_T
from paasta_tools.utils import list_services
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import SPACER
from paasta_tools.utils import SystemPaastaConfig
try:
import yelp_meteorite
except ImportError:
yelp_meteorite = None
log = logging.getLogger(__name__)
CheckServiceReplication = Callable[
[
Arg(InstanceConfig_T, "instance_config"),
Arg(Sequence[Union[MarathonTask, V1Pod]], "all_tasks_or_pods"),
Arg(Any, "replication_checker"),
NamedArg(bool, "dry_run"),
],
Optional[bool],
]
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
"--crit",
dest="under_replicated_crit_pct",
type=float,
default=10,
help="The percentage of under replicated service instances past which "
"the script will return a critical status",
)
parser.add_argument(
"--min-count-critical",
dest="min_count_critical",
type=int,
default=5,
help="The script will not return a critical status if the number of "
"under replicated service instances is below this number, even if the "
"percentage is above the critical percentage.",
)
parser.add_argument(
"service_instance_list",
nargs="*",
help="The list of service instances to check",
metavar="SERVICE%sINSTANCE" % SPACER,
)
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
parser.add_argument(
"--dry-run",
action="store_true",
dest="dry_run",
help="Print Sensu alert events and metrics instead of sending them",
)
options = parser.parse_args()
return options
def check_services_replication(
soa_dir: str,
cluster: str,
service_instances: Sequence[str],
instance_type_class: Type[InstanceConfig_T],
check_service_replication: CheckServiceReplication,
replication_checker: ReplicationChecker,
all_tasks_or_pods: Sequence[Union[MarathonTask, V1Pod]],
dry_run: bool = False,
) -> Tuple[int, int]:
service_instances_set = set(service_instances)
replication_statuses: List[bool] = []
for service in list_services(soa_dir=soa_dir):
service_config = PaastaServiceConfigLoader(service=service, soa_dir=soa_dir)
for instance_config in service_config.instance_configs(
cluster=cluster, instance_type_class=instance_type_class
):
if (
service_instances_set
and f"{service}{SPACER}{instance_config.instance}"
not in service_instances_set
):
continue
if instance_config.get_docker_image():
is_well_replicated = check_service_replication(
instance_config=instance_config,
all_tasks_or_pods=all_tasks_or_pods,
replication_checker=replication_checker,
dry_run=dry_run,
)
if is_well_replicated is not None:
replication_statuses.append(is_well_replicated)
else:
log.debug(
"%s is not deployed. Skipping replication monitoring."
% instance_config.job_id
)
num_under_replicated = len(
[status for status in replication_statuses if status is False]
)
return num_under_replicated, len(replication_statuses)
def emit_cluster_replication_metrics(
pct_under_replicated: float,
cluster: str,
scheduler: str,
dry_run: bool = False,
) -> None:
metric_name = "paasta.pct_services_under_replicated"
if dry_run:
print(f"Would've sent value {pct_under_replicated} for metric '{metric_name}'")
else:
meteorite_dims = {"paasta_cluster": cluster, "scheduler": scheduler}
gauge = yelp_meteorite.create_gauge(metric_name, meteorite_dims)
gauge.set(pct_under_replicated)
def main(
instance_type_class: Type[InstanceConfig_T],
check_service_replication: CheckServiceReplication,
namespace: str,
mesos: bool = False,
) -> None:
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
replication_checker: ReplicationChecker
if mesos:
tasks_or_pods, slaves = get_mesos_tasks_and_slaves(system_paasta_config)
replication_checker = MesosSmartstackEnvoyReplicationChecker(
mesos_slaves=slaves,
system_paasta_config=system_paasta_config,
)
else:
tasks_or_pods, nodes = get_kubernetes_pods_and_nodes(namespace)
replication_checker = KubeSmartstackEnvoyReplicationChecker(
nodes=nodes,
system_paasta_config=system_paasta_config,
)
count_under_replicated, total = check_services_replication(
soa_dir=args.soa_dir,
cluster=cluster,
service_instances=args.service_instance_list,
instance_type_class=instance_type_class,
check_service_replication=check_service_replication,
replication_checker=replication_checker,
all_tasks_or_pods=tasks_or_pods,
dry_run=args.dry_run,
)
pct_under_replicated = 0 if total == 0 else 100 * count_under_replicated / total
if yelp_meteorite is not None:
emit_cluster_replication_metrics(
pct_under_replicated,
cluster,
scheduler="mesos" if mesos else "kubernetes",
dry_run=args.dry_run,
)
if (
pct_under_replicated >= args.under_replicated_crit_pct
and count_under_replicated >= args.min_count_critical
):
log.critical(
f"{pct_under_replicated}% of instances ({count_under_replicated}/{total}) "
f"are under replicated (past {args.under_replicated_crit_pct} is critical)!"
)
sys.exit(2)
else:
sys.exit(0)
def get_mesos_tasks_and_slaves(
system_paasta_config: SystemPaastaConfig,
) -> Tuple[Sequence[MarathonTask], List[Any]]:
clients = get_marathon_clients(get_marathon_servers(system_paasta_config))
all_clients: Sequence[MarathonClient] = clients.get_all_clients()
all_tasks: List[MarathonTask] = []
for client in all_clients:
all_tasks.extend(client.list_tasks())
mesos_slaves = a_sync.block(get_slaves)
return all_tasks, mesos_slaves
def get_kubernetes_pods_and_nodes(
namespace: str,
) -> Tuple[Sequence[V1Pod], Sequence[V1Node]]:
kube_client = KubeClient()
all_pods = get_all_pods(kube_client=kube_client, namespace=namespace)
all_nodes = get_all_nodes(kube_client)
return all_pods, all_nodes
| {
"content_hash": "5065763be710fc08ae43756b642785d3",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 88,
"avg_line_length": 34.21632653061224,
"alnum_prop": 0.6669450077537874,
"repo_name": "Yelp/paasta",
"id": "af3c05fd885d50f255cff2e7279aa4b179718654",
"size": "8961",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paasta_tools/check_services_replication_tools.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "19456"
},
{
"name": "Gherkin",
"bytes": "4399"
},
{
"name": "Makefile",
"bytes": "12710"
},
{
"name": "Python",
"bytes": "4745271"
},
{
"name": "Shell",
"bytes": "98025"
}
],
"symlink_target": ""
} |
from google.net.proto import ProtocolBuffer
import array
import _dummy_thread as thread
__pychecker__ = """maxreturns=0 maxbranches=0 no-callinit
unusednames=printElemNumber,debug_strs no-special"""
if hasattr(ProtocolBuffer, 'ExtendableProtocolMessage'):
_extension_runtime = True
_ExtendableProtocolMessage = ProtocolBuffer.ExtendableProtocolMessage
else:
_extension_runtime = False
_ExtendableProtocolMessage = ProtocolBuffer.ProtocolMessage
from google.appengine.api.api_base_pb import *
import google.appengine.api.api_base_pb
from google.appengine.api.source_pb import *
import google.appengine.api.source_pb
class LogServiceError(ProtocolBuffer.ProtocolMessage):
OK = 0
INVALID_REQUEST = 1
STORAGE_ERROR = 2
_ErrorCode_NAMES = {
0: "OK",
1: "INVALID_REQUEST",
2: "STORAGE_ERROR",
}
def ErrorCode_Name(cls, x): return cls._ErrorCode_NAMES.get(x, "")
ErrorCode_Name = classmethod(ErrorCode_Name)
def __init__(self, contents=None):
pass
if contents is not None: self.MergeFromString(contents)
def MergeFrom(self, x):
assert x is not self
def Equals(self, x):
if x is self: return 1
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
return n
def ByteSizePartial(self):
n = 0
return n
def Clear(self):
pass
def OutputUnchecked(self, out):
pass
def OutputPartial(self, out):
pass
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
}, 0)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
}, 0, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogServiceError'
class UserAppLogLine(ProtocolBuffer.ProtocolMessage):
has_timestamp_usec_ = 0
timestamp_usec_ = 0
has_level_ = 0
level_ = 0
has_message_ = 0
message_ = ""
has_source_location_ = 0
source_location_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def timestamp_usec(self): return self.timestamp_usec_
def set_timestamp_usec(self, x):
self.has_timestamp_usec_ = 1
self.timestamp_usec_ = x
def clear_timestamp_usec(self):
if self.has_timestamp_usec_:
self.has_timestamp_usec_ = 0
self.timestamp_usec_ = 0
def has_timestamp_usec(self): return self.has_timestamp_usec_
def level(self): return self.level_
def set_level(self, x):
self.has_level_ = 1
self.level_ = x
def clear_level(self):
if self.has_level_:
self.has_level_ = 0
self.level_ = 0
def has_level(self): return self.has_level_
def message(self): return self.message_
def set_message(self, x):
self.has_message_ = 1
self.message_ = x
def clear_message(self):
if self.has_message_:
self.has_message_ = 0
self.message_ = ""
def has_message(self): return self.has_message_
def source_location(self):
if self.source_location_ is None:
self.lazy_init_lock_.acquire()
try:
if self.source_location_ is None: self.source_location_ = SourceLocation()
finally:
self.lazy_init_lock_.release()
return self.source_location_
def mutable_source_location(self): self.has_source_location_ = 1; return self.source_location()
def clear_source_location(self):
if self.has_source_location_:
self.has_source_location_ = 0;
if self.source_location_ is not None: self.source_location_.Clear()
def has_source_location(self): return self.has_source_location_
def MergeFrom(self, x):
assert x is not self
if (x.has_timestamp_usec()): self.set_timestamp_usec(x.timestamp_usec())
if (x.has_level()): self.set_level(x.level())
if (x.has_message()): self.set_message(x.message())
if (x.has_source_location()): self.mutable_source_location().MergeFrom(x.source_location())
def Equals(self, x):
if x is self: return 1
if self.has_timestamp_usec_ != x.has_timestamp_usec_: return 0
if self.has_timestamp_usec_ and self.timestamp_usec_ != x.timestamp_usec_: return 0
if self.has_level_ != x.has_level_: return 0
if self.has_level_ and self.level_ != x.level_: return 0
if self.has_message_ != x.has_message_: return 0
if self.has_message_ and self.message_ != x.message_: return 0
if self.has_source_location_ != x.has_source_location_: return 0
if self.has_source_location_ and self.source_location_ != x.source_location_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_timestamp_usec_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: timestamp_usec not set.')
if (not self.has_level_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: level not set.')
if (not self.has_message_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: message not set.')
if (self.has_source_location_ and not self.source_location_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.timestamp_usec_)
n += self.lengthVarInt64(self.level_)
n += self.lengthString(len(self.message_))
if (self.has_source_location_): n += 1 + self.lengthString(self.source_location_.ByteSize())
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_timestamp_usec_):
n += 1
n += self.lengthVarInt64(self.timestamp_usec_)
if (self.has_level_):
n += 1
n += self.lengthVarInt64(self.level_)
if (self.has_message_):
n += 1
n += self.lengthString(len(self.message_))
if (self.has_source_location_): n += 1 + self.lengthString(self.source_location_.ByteSizePartial())
return n
def Clear(self):
self.clear_timestamp_usec()
self.clear_level()
self.clear_message()
self.clear_source_location()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.timestamp_usec_)
out.putVarInt32(16)
out.putVarInt64(self.level_)
out.putVarInt32(26)
out.putPrefixedString(self.message_)
if (self.has_source_location_):
out.putVarInt32(34)
out.putVarInt32(self.source_location_.ByteSize())
self.source_location_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_timestamp_usec_):
out.putVarInt32(8)
out.putVarInt64(self.timestamp_usec_)
if (self.has_level_):
out.putVarInt32(16)
out.putVarInt64(self.level_)
if (self.has_message_):
out.putVarInt32(26)
out.putPrefixedString(self.message_)
if (self.has_source_location_):
out.putVarInt32(34)
out.putVarInt32(self.source_location_.ByteSizePartial())
self.source_location_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_timestamp_usec(d.getVarInt64())
continue
if tt == 16:
self.set_level(d.getVarInt64())
continue
if tt == 26:
self.set_message(d.getPrefixedString())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_source_location().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_timestamp_usec_: res+=prefix+("timestamp_usec: %s\n" % self.DebugFormatInt64(self.timestamp_usec_))
if self.has_level_: res+=prefix+("level: %s\n" % self.DebugFormatInt64(self.level_))
if self.has_message_: res+=prefix+("message: %s\n" % self.DebugFormatString(self.message_))
if self.has_source_location_:
res+=prefix+"source_location <\n"
res+=self.source_location_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
ktimestamp_usec = 1
klevel = 2
kmessage = 3
ksource_location = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "timestamp_usec",
2: "level",
3: "message",
4: "source_location",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.UserAppLogLine'
class UserAppLogGroup(ProtocolBuffer.ProtocolMessage):
def __init__(self, contents=None):
self.log_line_ = []
if contents is not None: self.MergeFromString(contents)
def log_line_size(self): return len(self.log_line_)
def log_line_list(self): return self.log_line_
def log_line(self, i):
return self.log_line_[i]
def mutable_log_line(self, i):
return self.log_line_[i]
def add_log_line(self):
x = UserAppLogLine()
self.log_line_.append(x)
return x
def clear_log_line(self):
self.log_line_ = []
def MergeFrom(self, x):
assert x is not self
for i in range(x.log_line_size()): self.add_log_line().CopyFrom(x.log_line(i))
def Equals(self, x):
if x is self: return 1
if len(self.log_line_) != len(x.log_line_): return 0
for e1, e2 in zip(self.log_line_, x.log_line_):
if e1 != e2: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.log_line_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.log_line_)
for i in range(len(self.log_line_)): n += self.lengthString(self.log_line_[i].ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.log_line_)
for i in range(len(self.log_line_)): n += self.lengthString(self.log_line_[i].ByteSizePartial())
return n
def Clear(self):
self.clear_log_line()
def OutputUnchecked(self, out):
for i in range(len(self.log_line_)):
out.putVarInt32(18)
out.putVarInt32(self.log_line_[i].ByteSize())
self.log_line_[i].OutputUnchecked(out)
def OutputPartial(self, out):
for i in range(len(self.log_line_)):
out.putVarInt32(18)
out.putVarInt32(self.log_line_[i].ByteSizePartial())
self.log_line_[i].OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_log_line().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.log_line_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("log_line%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
klog_line = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
2: "log_line",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.UserAppLogGroup'
class FlushRequest(ProtocolBuffer.ProtocolMessage):
has_logs_ = 0
logs_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def logs(self): return self.logs_
def set_logs(self, x):
self.has_logs_ = 1
self.logs_ = x
def clear_logs(self):
if self.has_logs_:
self.has_logs_ = 0
self.logs_ = ""
def has_logs(self): return self.has_logs_
def MergeFrom(self, x):
assert x is not self
if (x.has_logs()): self.set_logs(x.logs())
def Equals(self, x):
if x is self: return 1
if self.has_logs_ != x.has_logs_: return 0
if self.has_logs_ and self.logs_ != x.logs_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_logs_): n += 1 + self.lengthString(len(self.logs_))
return n
def ByteSizePartial(self):
n = 0
if (self.has_logs_): n += 1 + self.lengthString(len(self.logs_))
return n
def Clear(self):
self.clear_logs()
def OutputUnchecked(self, out):
if (self.has_logs_):
out.putVarInt32(10)
out.putPrefixedString(self.logs_)
def OutputPartial(self, out):
if (self.has_logs_):
out.putVarInt32(10)
out.putPrefixedString(self.logs_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_logs(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_logs_: res+=prefix+("logs: %s\n" % self.DebugFormatString(self.logs_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
klogs = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "logs",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.FlushRequest'
class SetStatusRequest(ProtocolBuffer.ProtocolMessage):
has_status_ = 0
status_ = ""
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def status(self): return self.status_
def set_status(self, x):
self.has_status_ = 1
self.status_ = x
def clear_status(self):
if self.has_status_:
self.has_status_ = 0
self.status_ = ""
def has_status(self): return self.has_status_
def MergeFrom(self, x):
assert x is not self
if (x.has_status()): self.set_status(x.status())
def Equals(self, x):
if x is self: return 1
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.status_))
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_status_):
n += 1
n += self.lengthString(len(self.status_))
return n
def Clear(self):
self.clear_status()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.status_)
def OutputPartial(self, out):
if (self.has_status_):
out.putVarInt32(10)
out.putPrefixedString(self.status_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_status(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_status_: res+=prefix+("status: %s\n" % self.DebugFormatString(self.status_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kstatus = 1
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "status",
}, 1)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
}, 1, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.SetStatusRequest'
class LogOffset(ProtocolBuffer.ProtocolMessage):
has_request_id_ = 0
request_id_ = ""
has_request_id_set_ = 0
request_id_set_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def request_id_set(self): return self.request_id_set_
def set_request_id_set(self, x):
self.has_request_id_set_ = 1
self.request_id_set_ = x
def clear_request_id_set(self):
if self.has_request_id_set_:
self.has_request_id_set_ = 0
self.request_id_set_ = 0
def has_request_id_set(self): return self.has_request_id_set_
def MergeFrom(self, x):
assert x is not self
if (x.has_request_id()): self.set_request_id(x.request_id())
if (x.has_request_id_set()): self.set_request_id_set(x.request_id_set())
def Equals(self, x):
if x is self: return 1
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
if self.has_request_id_set_ != x.has_request_id_set_: return 0
if self.has_request_id_set_ and self.request_id_set_ != x.request_id_set_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
if (self.has_request_id_set_): n += 3
return n
def ByteSizePartial(self):
n = 0
if (self.has_request_id_): n += 1 + self.lengthString(len(self.request_id_))
if (self.has_request_id_set_): n += 3
return n
def Clear(self):
self.clear_request_id()
self.clear_request_id_set()
def OutputUnchecked(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
if (self.has_request_id_set_):
out.putVarInt32(808)
out.putBoolean(self.request_id_set_)
def OutputPartial(self, out):
if (self.has_request_id_):
out.putVarInt32(10)
out.putPrefixedString(self.request_id_)
if (self.has_request_id_set_):
out.putVarInt32(808)
out.putBoolean(self.request_id_set_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_request_id(d.getPrefixedString())
continue
if tt == 808:
self.set_request_id_set(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
if self.has_request_id_set_: res+=prefix+("request_id_set: %s\n" % self.DebugFormatBool(self.request_id_set_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
krequest_id = 1
krequest_id_set = 101
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "request_id",
101: "request_id_set",
}, 101)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
101: ProtocolBuffer.Encoder.NUMERIC,
}, 101, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogOffset'
class LogLine(ProtocolBuffer.ProtocolMessage):
has_time_ = 0
time_ = 0
has_level_ = 0
level_ = 0
has_log_message_ = 0
log_message_ = ""
has_source_location_ = 0
source_location_ = None
def __init__(self, contents=None):
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def time(self): return self.time_
def set_time(self, x):
self.has_time_ = 1
self.time_ = x
def clear_time(self):
if self.has_time_:
self.has_time_ = 0
self.time_ = 0
def has_time(self): return self.has_time_
def level(self): return self.level_
def set_level(self, x):
self.has_level_ = 1
self.level_ = x
def clear_level(self):
if self.has_level_:
self.has_level_ = 0
self.level_ = 0
def has_level(self): return self.has_level_
def log_message(self): return self.log_message_
def set_log_message(self, x):
self.has_log_message_ = 1
self.log_message_ = x
def clear_log_message(self):
if self.has_log_message_:
self.has_log_message_ = 0
self.log_message_ = ""
def has_log_message(self): return self.has_log_message_
def source_location(self):
if self.source_location_ is None:
self.lazy_init_lock_.acquire()
try:
if self.source_location_ is None: self.source_location_ = SourceLocation()
finally:
self.lazy_init_lock_.release()
return self.source_location_
def mutable_source_location(self): self.has_source_location_ = 1; return self.source_location()
def clear_source_location(self):
if self.has_source_location_:
self.has_source_location_ = 0;
if self.source_location_ is not None: self.source_location_.Clear()
def has_source_location(self): return self.has_source_location_
def MergeFrom(self, x):
assert x is not self
if (x.has_time()): self.set_time(x.time())
if (x.has_level()): self.set_level(x.level())
if (x.has_log_message()): self.set_log_message(x.log_message())
if (x.has_source_location()): self.mutable_source_location().MergeFrom(x.source_location())
def Equals(self, x):
if x is self: return 1
if self.has_time_ != x.has_time_: return 0
if self.has_time_ and self.time_ != x.time_: return 0
if self.has_level_ != x.has_level_: return 0
if self.has_level_ and self.level_ != x.level_: return 0
if self.has_log_message_ != x.has_log_message_: return 0
if self.has_log_message_ and self.log_message_ != x.log_message_: return 0
if self.has_source_location_ != x.has_source_location_: return 0
if self.has_source_location_ and self.source_location_ != x.source_location_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_time_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: time not set.')
if (not self.has_level_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: level not set.')
if (not self.has_log_message_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: log_message not set.')
if (self.has_source_location_ and not self.source_location_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthVarInt64(self.time_)
n += self.lengthVarInt64(self.level_)
n += self.lengthString(len(self.log_message_))
if (self.has_source_location_): n += 1 + self.lengthString(self.source_location_.ByteSize())
return n + 3
def ByteSizePartial(self):
n = 0
if (self.has_time_):
n += 1
n += self.lengthVarInt64(self.time_)
if (self.has_level_):
n += 1
n += self.lengthVarInt64(self.level_)
if (self.has_log_message_):
n += 1
n += self.lengthString(len(self.log_message_))
if (self.has_source_location_): n += 1 + self.lengthString(self.source_location_.ByteSizePartial())
return n
def Clear(self):
self.clear_time()
self.clear_level()
self.clear_log_message()
self.clear_source_location()
def OutputUnchecked(self, out):
out.putVarInt32(8)
out.putVarInt64(self.time_)
out.putVarInt32(16)
out.putVarInt32(self.level_)
out.putVarInt32(26)
out.putPrefixedString(self.log_message_)
if (self.has_source_location_):
out.putVarInt32(34)
out.putVarInt32(self.source_location_.ByteSize())
self.source_location_.OutputUnchecked(out)
def OutputPartial(self, out):
if (self.has_time_):
out.putVarInt32(8)
out.putVarInt64(self.time_)
if (self.has_level_):
out.putVarInt32(16)
out.putVarInt32(self.level_)
if (self.has_log_message_):
out.putVarInt32(26)
out.putPrefixedString(self.log_message_)
if (self.has_source_location_):
out.putVarInt32(34)
out.putVarInt32(self.source_location_.ByteSizePartial())
self.source_location_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 8:
self.set_time(d.getVarInt64())
continue
if tt == 16:
self.set_level(d.getVarInt32())
continue
if tt == 26:
self.set_log_message(d.getPrefixedString())
continue
if tt == 34:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_source_location().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_time_: res+=prefix+("time: %s\n" % self.DebugFormatInt64(self.time_))
if self.has_level_: res+=prefix+("level: %s\n" % self.DebugFormatInt32(self.level_))
if self.has_log_message_: res+=prefix+("log_message: %s\n" % self.DebugFormatString(self.log_message_))
if self.has_source_location_:
res+=prefix+"source_location <\n"
res+=self.source_location_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
ktime = 1
klevel = 2
klog_message = 3
ksource_location = 4
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "time",
2: "level",
3: "log_message",
4: "source_location",
}, 4)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.NUMERIC,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
}, 4, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogLine'
class RequestLog(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_module_id_ = 0
module_id_ = "default"
has_version_id_ = 0
version_id_ = ""
has_request_id_ = 0
request_id_ = ""
has_offset_ = 0
offset_ = None
has_ip_ = 0
ip_ = ""
has_nickname_ = 0
nickname_ = ""
has_start_time_ = 0
start_time_ = 0
has_end_time_ = 0
end_time_ = 0
has_latency_ = 0
latency_ = 0
has_mcycles_ = 0
mcycles_ = 0
has_method_ = 0
method_ = ""
has_resource_ = 0
resource_ = ""
has_http_version_ = 0
http_version_ = ""
has_status_ = 0
status_ = 0
has_response_size_ = 0
response_size_ = 0
has_referrer_ = 0
referrer_ = ""
has_user_agent_ = 0
user_agent_ = ""
has_url_map_entry_ = 0
url_map_entry_ = ""
has_combined_ = 0
combined_ = ""
has_api_mcycles_ = 0
api_mcycles_ = 0
has_host_ = 0
host_ = ""
has_cost_ = 0
cost_ = 0.0
has_task_queue_name_ = 0
task_queue_name_ = ""
has_task_name_ = 0
task_name_ = ""
has_was_loading_request_ = 0
was_loading_request_ = 0
has_pending_time_ = 0
pending_time_ = 0
has_replica_index_ = 0
replica_index_ = -1
has_finished_ = 0
finished_ = 1
has_clone_key_ = 0
clone_key_ = ""
has_lines_incomplete_ = 0
lines_incomplete_ = 0
has_app_engine_release_ = 0
app_engine_release_ = ""
has_trace_id_ = 0
trace_id_ = ""
has_exit_reason_ = 0
exit_reason_ = 0
has_was_throttled_for_time_ = 0
was_throttled_for_time_ = 0
has_was_throttled_for_requests_ = 0
was_throttled_for_requests_ = 0
has_throttled_time_ = 0
throttled_time_ = 0
has_server_name_ = 0
server_name_ = ""
def __init__(self, contents=None):
self.line_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def module_id(self): return self.module_id_
def set_module_id(self, x):
self.has_module_id_ = 1
self.module_id_ = x
def clear_module_id(self):
if self.has_module_id_:
self.has_module_id_ = 0
self.module_id_ = "default"
def has_module_id(self): return self.has_module_id_
def version_id(self): return self.version_id_
def set_version_id(self, x):
self.has_version_id_ = 1
self.version_id_ = x
def clear_version_id(self):
if self.has_version_id_:
self.has_version_id_ = 0
self.version_id_ = ""
def has_version_id(self): return self.has_version_id_
def request_id(self): return self.request_id_
def set_request_id(self, x):
self.has_request_id_ = 1
self.request_id_ = x
def clear_request_id(self):
if self.has_request_id_:
self.has_request_id_ = 0
self.request_id_ = ""
def has_request_id(self): return self.has_request_id_
def offset(self):
if self.offset_ is None:
self.lazy_init_lock_.acquire()
try:
if self.offset_ is None: self.offset_ = LogOffset()
finally:
self.lazy_init_lock_.release()
return self.offset_
def mutable_offset(self): self.has_offset_ = 1; return self.offset()
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0;
if self.offset_ is not None: self.offset_.Clear()
def has_offset(self): return self.has_offset_
def ip(self): return self.ip_
def set_ip(self, x):
self.has_ip_ = 1
self.ip_ = x
def clear_ip(self):
if self.has_ip_:
self.has_ip_ = 0
self.ip_ = ""
def has_ip(self): return self.has_ip_
def nickname(self): return self.nickname_
def set_nickname(self, x):
self.has_nickname_ = 1
self.nickname_ = x
def clear_nickname(self):
if self.has_nickname_:
self.has_nickname_ = 0
self.nickname_ = ""
def has_nickname(self): return self.has_nickname_
def start_time(self): return self.start_time_
def set_start_time(self, x):
self.has_start_time_ = 1
self.start_time_ = x
def clear_start_time(self):
if self.has_start_time_:
self.has_start_time_ = 0
self.start_time_ = 0
def has_start_time(self): return self.has_start_time_
def end_time(self): return self.end_time_
def set_end_time(self, x):
self.has_end_time_ = 1
self.end_time_ = x
def clear_end_time(self):
if self.has_end_time_:
self.has_end_time_ = 0
self.end_time_ = 0
def has_end_time(self): return self.has_end_time_
def latency(self): return self.latency_
def set_latency(self, x):
self.has_latency_ = 1
self.latency_ = x
def clear_latency(self):
if self.has_latency_:
self.has_latency_ = 0
self.latency_ = 0
def has_latency(self): return self.has_latency_
def mcycles(self): return self.mcycles_
def set_mcycles(self, x):
self.has_mcycles_ = 1
self.mcycles_ = x
def clear_mcycles(self):
if self.has_mcycles_:
self.has_mcycles_ = 0
self.mcycles_ = 0
def has_mcycles(self): return self.has_mcycles_
def method(self): return self.method_
def set_method(self, x):
self.has_method_ = 1
self.method_ = x
def clear_method(self):
if self.has_method_:
self.has_method_ = 0
self.method_ = ""
def has_method(self): return self.has_method_
def resource(self): return self.resource_
def set_resource(self, x):
self.has_resource_ = 1
self.resource_ = x
def clear_resource(self):
if self.has_resource_:
self.has_resource_ = 0
self.resource_ = ""
def has_resource(self): return self.has_resource_
def http_version(self): return self.http_version_
def set_http_version(self, x):
self.has_http_version_ = 1
self.http_version_ = x
def clear_http_version(self):
if self.has_http_version_:
self.has_http_version_ = 0
self.http_version_ = ""
def has_http_version(self): return self.has_http_version_
def status(self): return self.status_
def set_status(self, x):
self.has_status_ = 1
self.status_ = x
def clear_status(self):
if self.has_status_:
self.has_status_ = 0
self.status_ = 0
def has_status(self): return self.has_status_
def response_size(self): return self.response_size_
def set_response_size(self, x):
self.has_response_size_ = 1
self.response_size_ = x
def clear_response_size(self):
if self.has_response_size_:
self.has_response_size_ = 0
self.response_size_ = 0
def has_response_size(self): return self.has_response_size_
def referrer(self): return self.referrer_
def set_referrer(self, x):
self.has_referrer_ = 1
self.referrer_ = x
def clear_referrer(self):
if self.has_referrer_:
self.has_referrer_ = 0
self.referrer_ = ""
def has_referrer(self): return self.has_referrer_
def user_agent(self): return self.user_agent_
def set_user_agent(self, x):
self.has_user_agent_ = 1
self.user_agent_ = x
def clear_user_agent(self):
if self.has_user_agent_:
self.has_user_agent_ = 0
self.user_agent_ = ""
def has_user_agent(self): return self.has_user_agent_
def url_map_entry(self): return self.url_map_entry_
def set_url_map_entry(self, x):
self.has_url_map_entry_ = 1
self.url_map_entry_ = x
def clear_url_map_entry(self):
if self.has_url_map_entry_:
self.has_url_map_entry_ = 0
self.url_map_entry_ = ""
def has_url_map_entry(self): return self.has_url_map_entry_
def combined(self): return self.combined_
def set_combined(self, x):
self.has_combined_ = 1
self.combined_ = x
def clear_combined(self):
if self.has_combined_:
self.has_combined_ = 0
self.combined_ = ""
def has_combined(self): return self.has_combined_
def api_mcycles(self): return self.api_mcycles_
def set_api_mcycles(self, x):
self.has_api_mcycles_ = 1
self.api_mcycles_ = x
def clear_api_mcycles(self):
if self.has_api_mcycles_:
self.has_api_mcycles_ = 0
self.api_mcycles_ = 0
def has_api_mcycles(self): return self.has_api_mcycles_
def host(self): return self.host_
def set_host(self, x):
self.has_host_ = 1
self.host_ = x
def clear_host(self):
if self.has_host_:
self.has_host_ = 0
self.host_ = ""
def has_host(self): return self.has_host_
def cost(self): return self.cost_
def set_cost(self, x):
self.has_cost_ = 1
self.cost_ = x
def clear_cost(self):
if self.has_cost_:
self.has_cost_ = 0
self.cost_ = 0.0
def has_cost(self): return self.has_cost_
def task_queue_name(self): return self.task_queue_name_
def set_task_queue_name(self, x):
self.has_task_queue_name_ = 1
self.task_queue_name_ = x
def clear_task_queue_name(self):
if self.has_task_queue_name_:
self.has_task_queue_name_ = 0
self.task_queue_name_ = ""
def has_task_queue_name(self): return self.has_task_queue_name_
def task_name(self): return self.task_name_
def set_task_name(self, x):
self.has_task_name_ = 1
self.task_name_ = x
def clear_task_name(self):
if self.has_task_name_:
self.has_task_name_ = 0
self.task_name_ = ""
def has_task_name(self): return self.has_task_name_
def was_loading_request(self): return self.was_loading_request_
def set_was_loading_request(self, x):
self.has_was_loading_request_ = 1
self.was_loading_request_ = x
def clear_was_loading_request(self):
if self.has_was_loading_request_:
self.has_was_loading_request_ = 0
self.was_loading_request_ = 0
def has_was_loading_request(self): return self.has_was_loading_request_
def pending_time(self): return self.pending_time_
def set_pending_time(self, x):
self.has_pending_time_ = 1
self.pending_time_ = x
def clear_pending_time(self):
if self.has_pending_time_:
self.has_pending_time_ = 0
self.pending_time_ = 0
def has_pending_time(self): return self.has_pending_time_
def replica_index(self): return self.replica_index_
def set_replica_index(self, x):
self.has_replica_index_ = 1
self.replica_index_ = x
def clear_replica_index(self):
if self.has_replica_index_:
self.has_replica_index_ = 0
self.replica_index_ = -1
def has_replica_index(self): return self.has_replica_index_
def finished(self): return self.finished_
def set_finished(self, x):
self.has_finished_ = 1
self.finished_ = x
def clear_finished(self):
if self.has_finished_:
self.has_finished_ = 0
self.finished_ = 1
def has_finished(self): return self.has_finished_
def clone_key(self): return self.clone_key_
def set_clone_key(self, x):
self.has_clone_key_ = 1
self.clone_key_ = x
def clear_clone_key(self):
if self.has_clone_key_:
self.has_clone_key_ = 0
self.clone_key_ = ""
def has_clone_key(self): return self.has_clone_key_
def line_size(self): return len(self.line_)
def line_list(self): return self.line_
def line(self, i):
return self.line_[i]
def mutable_line(self, i):
return self.line_[i]
def add_line(self):
x = LogLine()
self.line_.append(x)
return x
def clear_line(self):
self.line_ = []
def lines_incomplete(self): return self.lines_incomplete_
def set_lines_incomplete(self, x):
self.has_lines_incomplete_ = 1
self.lines_incomplete_ = x
def clear_lines_incomplete(self):
if self.has_lines_incomplete_:
self.has_lines_incomplete_ = 0
self.lines_incomplete_ = 0
def has_lines_incomplete(self): return self.has_lines_incomplete_
def app_engine_release(self): return self.app_engine_release_
def set_app_engine_release(self, x):
self.has_app_engine_release_ = 1
self.app_engine_release_ = x
def clear_app_engine_release(self):
if self.has_app_engine_release_:
self.has_app_engine_release_ = 0
self.app_engine_release_ = ""
def has_app_engine_release(self): return self.has_app_engine_release_
def trace_id(self): return self.trace_id_
def set_trace_id(self, x):
self.has_trace_id_ = 1
self.trace_id_ = x
def clear_trace_id(self):
if self.has_trace_id_:
self.has_trace_id_ = 0
self.trace_id_ = ""
def has_trace_id(self): return self.has_trace_id_
def exit_reason(self): return self.exit_reason_
def set_exit_reason(self, x):
self.has_exit_reason_ = 1
self.exit_reason_ = x
def clear_exit_reason(self):
if self.has_exit_reason_:
self.has_exit_reason_ = 0
self.exit_reason_ = 0
def has_exit_reason(self): return self.has_exit_reason_
def was_throttled_for_time(self): return self.was_throttled_for_time_
def set_was_throttled_for_time(self, x):
self.has_was_throttled_for_time_ = 1
self.was_throttled_for_time_ = x
def clear_was_throttled_for_time(self):
if self.has_was_throttled_for_time_:
self.has_was_throttled_for_time_ = 0
self.was_throttled_for_time_ = 0
def has_was_throttled_for_time(self): return self.has_was_throttled_for_time_
def was_throttled_for_requests(self): return self.was_throttled_for_requests_
def set_was_throttled_for_requests(self, x):
self.has_was_throttled_for_requests_ = 1
self.was_throttled_for_requests_ = x
def clear_was_throttled_for_requests(self):
if self.has_was_throttled_for_requests_:
self.has_was_throttled_for_requests_ = 0
self.was_throttled_for_requests_ = 0
def has_was_throttled_for_requests(self): return self.has_was_throttled_for_requests_
def throttled_time(self): return self.throttled_time_
def set_throttled_time(self, x):
self.has_throttled_time_ = 1
self.throttled_time_ = x
def clear_throttled_time(self):
if self.has_throttled_time_:
self.has_throttled_time_ = 0
self.throttled_time_ = 0
def has_throttled_time(self): return self.has_throttled_time_
def server_name(self): return self.server_name_
def set_server_name(self, x):
self.has_server_name_ = 1
self.server_name_ = x
def clear_server_name(self):
if self.has_server_name_:
self.has_server_name_ = 0
self.server_name_ = ""
def has_server_name(self): return self.has_server_name_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
if (x.has_module_id()): self.set_module_id(x.module_id())
if (x.has_version_id()): self.set_version_id(x.version_id())
if (x.has_request_id()): self.set_request_id(x.request_id())
if (x.has_offset()): self.mutable_offset().MergeFrom(x.offset())
if (x.has_ip()): self.set_ip(x.ip())
if (x.has_nickname()): self.set_nickname(x.nickname())
if (x.has_start_time()): self.set_start_time(x.start_time())
if (x.has_end_time()): self.set_end_time(x.end_time())
if (x.has_latency()): self.set_latency(x.latency())
if (x.has_mcycles()): self.set_mcycles(x.mcycles())
if (x.has_method()): self.set_method(x.method())
if (x.has_resource()): self.set_resource(x.resource())
if (x.has_http_version()): self.set_http_version(x.http_version())
if (x.has_status()): self.set_status(x.status())
if (x.has_response_size()): self.set_response_size(x.response_size())
if (x.has_referrer()): self.set_referrer(x.referrer())
if (x.has_user_agent()): self.set_user_agent(x.user_agent())
if (x.has_url_map_entry()): self.set_url_map_entry(x.url_map_entry())
if (x.has_combined()): self.set_combined(x.combined())
if (x.has_api_mcycles()): self.set_api_mcycles(x.api_mcycles())
if (x.has_host()): self.set_host(x.host())
if (x.has_cost()): self.set_cost(x.cost())
if (x.has_task_queue_name()): self.set_task_queue_name(x.task_queue_name())
if (x.has_task_name()): self.set_task_name(x.task_name())
if (x.has_was_loading_request()): self.set_was_loading_request(x.was_loading_request())
if (x.has_pending_time()): self.set_pending_time(x.pending_time())
if (x.has_replica_index()): self.set_replica_index(x.replica_index())
if (x.has_finished()): self.set_finished(x.finished())
if (x.has_clone_key()): self.set_clone_key(x.clone_key())
for i in range(x.line_size()): self.add_line().CopyFrom(x.line(i))
if (x.has_lines_incomplete()): self.set_lines_incomplete(x.lines_incomplete())
if (x.has_app_engine_release()): self.set_app_engine_release(x.app_engine_release())
if (x.has_trace_id()): self.set_trace_id(x.trace_id())
if (x.has_exit_reason()): self.set_exit_reason(x.exit_reason())
if (x.has_was_throttled_for_time()): self.set_was_throttled_for_time(x.was_throttled_for_time())
if (x.has_was_throttled_for_requests()): self.set_was_throttled_for_requests(x.was_throttled_for_requests())
if (x.has_throttled_time()): self.set_throttled_time(x.throttled_time())
if (x.has_server_name()): self.set_server_name(x.server_name())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if self.has_module_id_ != x.has_module_id_: return 0
if self.has_module_id_ and self.module_id_ != x.module_id_: return 0
if self.has_version_id_ != x.has_version_id_: return 0
if self.has_version_id_ and self.version_id_ != x.version_id_: return 0
if self.has_request_id_ != x.has_request_id_: return 0
if self.has_request_id_ and self.request_id_ != x.request_id_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_ip_ != x.has_ip_: return 0
if self.has_ip_ and self.ip_ != x.ip_: return 0
if self.has_nickname_ != x.has_nickname_: return 0
if self.has_nickname_ and self.nickname_ != x.nickname_: return 0
if self.has_start_time_ != x.has_start_time_: return 0
if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
if self.has_end_time_ != x.has_end_time_: return 0
if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
if self.has_latency_ != x.has_latency_: return 0
if self.has_latency_ and self.latency_ != x.latency_: return 0
if self.has_mcycles_ != x.has_mcycles_: return 0
if self.has_mcycles_ and self.mcycles_ != x.mcycles_: return 0
if self.has_method_ != x.has_method_: return 0
if self.has_method_ and self.method_ != x.method_: return 0
if self.has_resource_ != x.has_resource_: return 0
if self.has_resource_ and self.resource_ != x.resource_: return 0
if self.has_http_version_ != x.has_http_version_: return 0
if self.has_http_version_ and self.http_version_ != x.http_version_: return 0
if self.has_status_ != x.has_status_: return 0
if self.has_status_ and self.status_ != x.status_: return 0
if self.has_response_size_ != x.has_response_size_: return 0
if self.has_response_size_ and self.response_size_ != x.response_size_: return 0
if self.has_referrer_ != x.has_referrer_: return 0
if self.has_referrer_ and self.referrer_ != x.referrer_: return 0
if self.has_user_agent_ != x.has_user_agent_: return 0
if self.has_user_agent_ and self.user_agent_ != x.user_agent_: return 0
if self.has_url_map_entry_ != x.has_url_map_entry_: return 0
if self.has_url_map_entry_ and self.url_map_entry_ != x.url_map_entry_: return 0
if self.has_combined_ != x.has_combined_: return 0
if self.has_combined_ and self.combined_ != x.combined_: return 0
if self.has_api_mcycles_ != x.has_api_mcycles_: return 0
if self.has_api_mcycles_ and self.api_mcycles_ != x.api_mcycles_: return 0
if self.has_host_ != x.has_host_: return 0
if self.has_host_ and self.host_ != x.host_: return 0
if self.has_cost_ != x.has_cost_: return 0
if self.has_cost_ and self.cost_ != x.cost_: return 0
if self.has_task_queue_name_ != x.has_task_queue_name_: return 0
if self.has_task_queue_name_ and self.task_queue_name_ != x.task_queue_name_: return 0
if self.has_task_name_ != x.has_task_name_: return 0
if self.has_task_name_ and self.task_name_ != x.task_name_: return 0
if self.has_was_loading_request_ != x.has_was_loading_request_: return 0
if self.has_was_loading_request_ and self.was_loading_request_ != x.was_loading_request_: return 0
if self.has_pending_time_ != x.has_pending_time_: return 0
if self.has_pending_time_ and self.pending_time_ != x.pending_time_: return 0
if self.has_replica_index_ != x.has_replica_index_: return 0
if self.has_replica_index_ and self.replica_index_ != x.replica_index_: return 0
if self.has_finished_ != x.has_finished_: return 0
if self.has_finished_ and self.finished_ != x.finished_: return 0
if self.has_clone_key_ != x.has_clone_key_: return 0
if self.has_clone_key_ and self.clone_key_ != x.clone_key_: return 0
if len(self.line_) != len(x.line_): return 0
for e1, e2 in zip(self.line_, x.line_):
if e1 != e2: return 0
if self.has_lines_incomplete_ != x.has_lines_incomplete_: return 0
if self.has_lines_incomplete_ and self.lines_incomplete_ != x.lines_incomplete_: return 0
if self.has_app_engine_release_ != x.has_app_engine_release_: return 0
if self.has_app_engine_release_ and self.app_engine_release_ != x.app_engine_release_: return 0
if self.has_trace_id_ != x.has_trace_id_: return 0
if self.has_trace_id_ and self.trace_id_ != x.trace_id_: return 0
if self.has_exit_reason_ != x.has_exit_reason_: return 0
if self.has_exit_reason_ and self.exit_reason_ != x.exit_reason_: return 0
if self.has_was_throttled_for_time_ != x.has_was_throttled_for_time_: return 0
if self.has_was_throttled_for_time_ and self.was_throttled_for_time_ != x.was_throttled_for_time_: return 0
if self.has_was_throttled_for_requests_ != x.has_was_throttled_for_requests_: return 0
if self.has_was_throttled_for_requests_ and self.was_throttled_for_requests_ != x.was_throttled_for_requests_: return 0
if self.has_throttled_time_ != x.has_throttled_time_: return 0
if self.has_throttled_time_ and self.throttled_time_ != x.throttled_time_: return 0
if self.has_server_name_ != x.has_server_name_: return 0
if self.has_server_name_ and self.server_name_ != x.server_name_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
if (not self.has_version_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: version_id not set.')
if (not self.has_request_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: request_id not set.')
if (self.has_offset_ and not self.offset_.IsInitialized(debug_strs)): initialized = 0
if (not self.has_ip_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: ip not set.')
if (not self.has_start_time_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: start_time not set.')
if (not self.has_end_time_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: end_time not set.')
if (not self.has_latency_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: latency not set.')
if (not self.has_mcycles_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: mcycles not set.')
if (not self.has_method_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: method not set.')
if (not self.has_resource_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: resource not set.')
if (not self.has_http_version_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: http_version not set.')
if (not self.has_status_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: status not set.')
if (not self.has_response_size_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: response_size not set.')
if (not self.has_url_map_entry_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: url_map_entry not set.')
if (not self.has_combined_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: combined not set.')
for p in self.line_:
if not p.IsInitialized(debug_strs): initialized=0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
if (self.has_module_id_): n += 2 + self.lengthString(len(self.module_id_))
n += self.lengthString(len(self.version_id_))
n += self.lengthString(len(self.request_id_))
if (self.has_offset_): n += 2 + self.lengthString(self.offset_.ByteSize())
n += self.lengthString(len(self.ip_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
n += self.lengthVarInt64(self.start_time_)
n += self.lengthVarInt64(self.end_time_)
n += self.lengthVarInt64(self.latency_)
n += self.lengthVarInt64(self.mcycles_)
n += self.lengthString(len(self.method_))
n += self.lengthString(len(self.resource_))
n += self.lengthString(len(self.http_version_))
n += self.lengthVarInt64(self.status_)
n += self.lengthVarInt64(self.response_size_)
if (self.has_referrer_): n += 1 + self.lengthString(len(self.referrer_))
if (self.has_user_agent_): n += 2 + self.lengthString(len(self.user_agent_))
n += self.lengthString(len(self.url_map_entry_))
n += self.lengthString(len(self.combined_))
if (self.has_api_mcycles_): n += 2 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_host_): n += 2 + self.lengthString(len(self.host_))
if (self.has_cost_): n += 10
if (self.has_task_queue_name_): n += 2 + self.lengthString(len(self.task_queue_name_))
if (self.has_task_name_): n += 2 + self.lengthString(len(self.task_name_))
if (self.has_was_loading_request_): n += 3
if (self.has_pending_time_): n += 2 + self.lengthVarInt64(self.pending_time_)
if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
if (self.has_finished_): n += 3
if (self.has_clone_key_): n += 2 + self.lengthString(len(self.clone_key_))
n += 2 * len(self.line_)
for i in range(len(self.line_)): n += self.lengthString(self.line_[i].ByteSize())
if (self.has_lines_incomplete_): n += 3
if (self.has_app_engine_release_): n += 2 + self.lengthString(len(self.app_engine_release_))
if (self.has_trace_id_): n += 2 + self.lengthString(len(self.trace_id_))
if (self.has_exit_reason_): n += 2 + self.lengthVarInt64(self.exit_reason_)
if (self.has_was_throttled_for_time_): n += 3
if (self.has_was_throttled_for_requests_): n += 3
if (self.has_throttled_time_): n += 2 + self.lengthVarInt64(self.throttled_time_)
if (self.has_server_name_): n += 2 + self.lengthString(len(self.server_name_))
return n + 17
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
if (self.has_module_id_): n += 2 + self.lengthString(len(self.module_id_))
if (self.has_version_id_):
n += 1
n += self.lengthString(len(self.version_id_))
if (self.has_request_id_):
n += 1
n += self.lengthString(len(self.request_id_))
if (self.has_offset_): n += 2 + self.lengthString(self.offset_.ByteSizePartial())
if (self.has_ip_):
n += 1
n += self.lengthString(len(self.ip_))
if (self.has_nickname_): n += 1 + self.lengthString(len(self.nickname_))
if (self.has_start_time_):
n += 1
n += self.lengthVarInt64(self.start_time_)
if (self.has_end_time_):
n += 1
n += self.lengthVarInt64(self.end_time_)
if (self.has_latency_):
n += 1
n += self.lengthVarInt64(self.latency_)
if (self.has_mcycles_):
n += 1
n += self.lengthVarInt64(self.mcycles_)
if (self.has_method_):
n += 1
n += self.lengthString(len(self.method_))
if (self.has_resource_):
n += 1
n += self.lengthString(len(self.resource_))
if (self.has_http_version_):
n += 1
n += self.lengthString(len(self.http_version_))
if (self.has_status_):
n += 1
n += self.lengthVarInt64(self.status_)
if (self.has_response_size_):
n += 1
n += self.lengthVarInt64(self.response_size_)
if (self.has_referrer_): n += 1 + self.lengthString(len(self.referrer_))
if (self.has_user_agent_): n += 2 + self.lengthString(len(self.user_agent_))
if (self.has_url_map_entry_):
n += 2
n += self.lengthString(len(self.url_map_entry_))
if (self.has_combined_):
n += 2
n += self.lengthString(len(self.combined_))
if (self.has_api_mcycles_): n += 2 + self.lengthVarInt64(self.api_mcycles_)
if (self.has_host_): n += 2 + self.lengthString(len(self.host_))
if (self.has_cost_): n += 10
if (self.has_task_queue_name_): n += 2 + self.lengthString(len(self.task_queue_name_))
if (self.has_task_name_): n += 2 + self.lengthString(len(self.task_name_))
if (self.has_was_loading_request_): n += 3
if (self.has_pending_time_): n += 2 + self.lengthVarInt64(self.pending_time_)
if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
if (self.has_finished_): n += 3
if (self.has_clone_key_): n += 2 + self.lengthString(len(self.clone_key_))
n += 2 * len(self.line_)
for i in range(len(self.line_)): n += self.lengthString(self.line_[i].ByteSizePartial())
if (self.has_lines_incomplete_): n += 3
if (self.has_app_engine_release_): n += 2 + self.lengthString(len(self.app_engine_release_))
if (self.has_trace_id_): n += 2 + self.lengthString(len(self.trace_id_))
if (self.has_exit_reason_): n += 2 + self.lengthVarInt64(self.exit_reason_)
if (self.has_was_throttled_for_time_): n += 3
if (self.has_was_throttled_for_requests_): n += 3
if (self.has_throttled_time_): n += 2 + self.lengthVarInt64(self.throttled_time_)
if (self.has_server_name_): n += 2 + self.lengthString(len(self.server_name_))
return n
def Clear(self):
self.clear_app_id()
self.clear_module_id()
self.clear_version_id()
self.clear_request_id()
self.clear_offset()
self.clear_ip()
self.clear_nickname()
self.clear_start_time()
self.clear_end_time()
self.clear_latency()
self.clear_mcycles()
self.clear_method()
self.clear_resource()
self.clear_http_version()
self.clear_status()
self.clear_response_size()
self.clear_referrer()
self.clear_user_agent()
self.clear_url_map_entry()
self.clear_combined()
self.clear_api_mcycles()
self.clear_host()
self.clear_cost()
self.clear_task_queue_name()
self.clear_task_name()
self.clear_was_loading_request()
self.clear_pending_time()
self.clear_replica_index()
self.clear_finished()
self.clear_clone_key()
self.clear_line()
self.clear_lines_incomplete()
self.clear_app_engine_release()
self.clear_trace_id()
self.clear_exit_reason()
self.clear_was_throttled_for_time()
self.clear_was_throttled_for_requests()
self.clear_throttled_time()
self.clear_server_name()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
out.putVarInt32(18)
out.putPrefixedString(self.version_id_)
out.putVarInt32(26)
out.putPrefixedString(self.request_id_)
out.putVarInt32(34)
out.putPrefixedString(self.ip_)
if (self.has_nickname_):
out.putVarInt32(42)
out.putPrefixedString(self.nickname_)
out.putVarInt32(48)
out.putVarInt64(self.start_time_)
out.putVarInt32(56)
out.putVarInt64(self.end_time_)
out.putVarInt32(64)
out.putVarInt64(self.latency_)
out.putVarInt32(72)
out.putVarInt64(self.mcycles_)
out.putVarInt32(82)
out.putPrefixedString(self.method_)
out.putVarInt32(90)
out.putPrefixedString(self.resource_)
out.putVarInt32(98)
out.putPrefixedString(self.http_version_)
out.putVarInt32(104)
out.putVarInt32(self.status_)
out.putVarInt32(112)
out.putVarInt64(self.response_size_)
if (self.has_referrer_):
out.putVarInt32(122)
out.putPrefixedString(self.referrer_)
if (self.has_user_agent_):
out.putVarInt32(130)
out.putPrefixedString(self.user_agent_)
out.putVarInt32(138)
out.putPrefixedString(self.url_map_entry_)
out.putVarInt32(146)
out.putPrefixedString(self.combined_)
if (self.has_api_mcycles_):
out.putVarInt32(152)
out.putVarInt64(self.api_mcycles_)
if (self.has_host_):
out.putVarInt32(162)
out.putPrefixedString(self.host_)
if (self.has_cost_):
out.putVarInt32(169)
out.putDouble(self.cost_)
if (self.has_task_queue_name_):
out.putVarInt32(178)
out.putPrefixedString(self.task_queue_name_)
if (self.has_task_name_):
out.putVarInt32(186)
out.putPrefixedString(self.task_name_)
if (self.has_was_loading_request_):
out.putVarInt32(192)
out.putBoolean(self.was_loading_request_)
if (self.has_pending_time_):
out.putVarInt32(200)
out.putVarInt64(self.pending_time_)
if (self.has_replica_index_):
out.putVarInt32(208)
out.putVarInt32(self.replica_index_)
if (self.has_finished_):
out.putVarInt32(216)
out.putBoolean(self.finished_)
if (self.has_clone_key_):
out.putVarInt32(226)
out.putPrefixedString(self.clone_key_)
for i in range(len(self.line_)):
out.putVarInt32(234)
out.putVarInt32(self.line_[i].ByteSize())
self.line_[i].OutputUnchecked(out)
if (self.has_exit_reason_):
out.putVarInt32(240)
out.putVarInt32(self.exit_reason_)
if (self.has_was_throttled_for_time_):
out.putVarInt32(248)
out.putBoolean(self.was_throttled_for_time_)
if (self.has_was_throttled_for_requests_):
out.putVarInt32(256)
out.putBoolean(self.was_throttled_for_requests_)
if (self.has_throttled_time_):
out.putVarInt32(264)
out.putVarInt64(self.throttled_time_)
if (self.has_server_name_):
out.putVarInt32(274)
out.putPrefixedString(self.server_name_)
if (self.has_offset_):
out.putVarInt32(282)
out.putVarInt32(self.offset_.ByteSize())
self.offset_.OutputUnchecked(out)
if (self.has_lines_incomplete_):
out.putVarInt32(288)
out.putBoolean(self.lines_incomplete_)
if (self.has_module_id_):
out.putVarInt32(298)
out.putPrefixedString(self.module_id_)
if (self.has_app_engine_release_):
out.putVarInt32(306)
out.putPrefixedString(self.app_engine_release_)
if (self.has_trace_id_):
out.putVarInt32(314)
out.putPrefixedString(self.trace_id_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
if (self.has_version_id_):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_)
if (self.has_request_id_):
out.putVarInt32(26)
out.putPrefixedString(self.request_id_)
if (self.has_ip_):
out.putVarInt32(34)
out.putPrefixedString(self.ip_)
if (self.has_nickname_):
out.putVarInt32(42)
out.putPrefixedString(self.nickname_)
if (self.has_start_time_):
out.putVarInt32(48)
out.putVarInt64(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(56)
out.putVarInt64(self.end_time_)
if (self.has_latency_):
out.putVarInt32(64)
out.putVarInt64(self.latency_)
if (self.has_mcycles_):
out.putVarInt32(72)
out.putVarInt64(self.mcycles_)
if (self.has_method_):
out.putVarInt32(82)
out.putPrefixedString(self.method_)
if (self.has_resource_):
out.putVarInt32(90)
out.putPrefixedString(self.resource_)
if (self.has_http_version_):
out.putVarInt32(98)
out.putPrefixedString(self.http_version_)
if (self.has_status_):
out.putVarInt32(104)
out.putVarInt32(self.status_)
if (self.has_response_size_):
out.putVarInt32(112)
out.putVarInt64(self.response_size_)
if (self.has_referrer_):
out.putVarInt32(122)
out.putPrefixedString(self.referrer_)
if (self.has_user_agent_):
out.putVarInt32(130)
out.putPrefixedString(self.user_agent_)
if (self.has_url_map_entry_):
out.putVarInt32(138)
out.putPrefixedString(self.url_map_entry_)
if (self.has_combined_):
out.putVarInt32(146)
out.putPrefixedString(self.combined_)
if (self.has_api_mcycles_):
out.putVarInt32(152)
out.putVarInt64(self.api_mcycles_)
if (self.has_host_):
out.putVarInt32(162)
out.putPrefixedString(self.host_)
if (self.has_cost_):
out.putVarInt32(169)
out.putDouble(self.cost_)
if (self.has_task_queue_name_):
out.putVarInt32(178)
out.putPrefixedString(self.task_queue_name_)
if (self.has_task_name_):
out.putVarInt32(186)
out.putPrefixedString(self.task_name_)
if (self.has_was_loading_request_):
out.putVarInt32(192)
out.putBoolean(self.was_loading_request_)
if (self.has_pending_time_):
out.putVarInt32(200)
out.putVarInt64(self.pending_time_)
if (self.has_replica_index_):
out.putVarInt32(208)
out.putVarInt32(self.replica_index_)
if (self.has_finished_):
out.putVarInt32(216)
out.putBoolean(self.finished_)
if (self.has_clone_key_):
out.putVarInt32(226)
out.putPrefixedString(self.clone_key_)
for i in range(len(self.line_)):
out.putVarInt32(234)
out.putVarInt32(self.line_[i].ByteSizePartial())
self.line_[i].OutputPartial(out)
if (self.has_exit_reason_):
out.putVarInt32(240)
out.putVarInt32(self.exit_reason_)
if (self.has_was_throttled_for_time_):
out.putVarInt32(248)
out.putBoolean(self.was_throttled_for_time_)
if (self.has_was_throttled_for_requests_):
out.putVarInt32(256)
out.putBoolean(self.was_throttled_for_requests_)
if (self.has_throttled_time_):
out.putVarInt32(264)
out.putVarInt64(self.throttled_time_)
if (self.has_server_name_):
out.putVarInt32(274)
out.putPrefixedString(self.server_name_)
if (self.has_offset_):
out.putVarInt32(282)
out.putVarInt32(self.offset_.ByteSizePartial())
self.offset_.OutputPartial(out)
if (self.has_lines_incomplete_):
out.putVarInt32(288)
out.putBoolean(self.lines_incomplete_)
if (self.has_module_id_):
out.putVarInt32(298)
out.putPrefixedString(self.module_id_)
if (self.has_app_engine_release_):
out.putVarInt32(306)
out.putPrefixedString(self.app_engine_release_)
if (self.has_trace_id_):
out.putVarInt32(314)
out.putPrefixedString(self.trace_id_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.set_version_id(d.getPrefixedString())
continue
if tt == 26:
self.set_request_id(d.getPrefixedString())
continue
if tt == 34:
self.set_ip(d.getPrefixedString())
continue
if tt == 42:
self.set_nickname(d.getPrefixedString())
continue
if tt == 48:
self.set_start_time(d.getVarInt64())
continue
if tt == 56:
self.set_end_time(d.getVarInt64())
continue
if tt == 64:
self.set_latency(d.getVarInt64())
continue
if tt == 72:
self.set_mcycles(d.getVarInt64())
continue
if tt == 82:
self.set_method(d.getPrefixedString())
continue
if tt == 90:
self.set_resource(d.getPrefixedString())
continue
if tt == 98:
self.set_http_version(d.getPrefixedString())
continue
if tt == 104:
self.set_status(d.getVarInt32())
continue
if tt == 112:
self.set_response_size(d.getVarInt64())
continue
if tt == 122:
self.set_referrer(d.getPrefixedString())
continue
if tt == 130:
self.set_user_agent(d.getPrefixedString())
continue
if tt == 138:
self.set_url_map_entry(d.getPrefixedString())
continue
if tt == 146:
self.set_combined(d.getPrefixedString())
continue
if tt == 152:
self.set_api_mcycles(d.getVarInt64())
continue
if tt == 162:
self.set_host(d.getPrefixedString())
continue
if tt == 169:
self.set_cost(d.getDouble())
continue
if tt == 178:
self.set_task_queue_name(d.getPrefixedString())
continue
if tt == 186:
self.set_task_name(d.getPrefixedString())
continue
if tt == 192:
self.set_was_loading_request(d.getBoolean())
continue
if tt == 200:
self.set_pending_time(d.getVarInt64())
continue
if tt == 208:
self.set_replica_index(d.getVarInt32())
continue
if tt == 216:
self.set_finished(d.getBoolean())
continue
if tt == 226:
self.set_clone_key(d.getPrefixedString())
continue
if tt == 234:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_line().TryMerge(tmp)
continue
if tt == 240:
self.set_exit_reason(d.getVarInt32())
continue
if tt == 248:
self.set_was_throttled_for_time(d.getBoolean())
continue
if tt == 256:
self.set_was_throttled_for_requests(d.getBoolean())
continue
if tt == 264:
self.set_throttled_time(d.getVarInt64())
continue
if tt == 274:
self.set_server_name(d.getPrefixedString())
continue
if tt == 282:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_offset().TryMerge(tmp)
continue
if tt == 288:
self.set_lines_incomplete(d.getBoolean())
continue
if tt == 298:
self.set_module_id(d.getPrefixedString())
continue
if tt == 306:
self.set_app_engine_release(d.getPrefixedString())
continue
if tt == 314:
self.set_trace_id(d.getPrefixedString())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
if self.has_module_id_: res+=prefix+("module_id: %s\n" % self.DebugFormatString(self.module_id_))
if self.has_version_id_: res+=prefix+("version_id: %s\n" % self.DebugFormatString(self.version_id_))
if self.has_request_id_: res+=prefix+("request_id: %s\n" % self.DebugFormatString(self.request_id_))
if self.has_offset_:
res+=prefix+"offset <\n"
res+=self.offset_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_ip_: res+=prefix+("ip: %s\n" % self.DebugFormatString(self.ip_))
if self.has_nickname_: res+=prefix+("nickname: %s\n" % self.DebugFormatString(self.nickname_))
if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt64(self.start_time_))
if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt64(self.end_time_))
if self.has_latency_: res+=prefix+("latency: %s\n" % self.DebugFormatInt64(self.latency_))
if self.has_mcycles_: res+=prefix+("mcycles: %s\n" % self.DebugFormatInt64(self.mcycles_))
if self.has_method_: res+=prefix+("method: %s\n" % self.DebugFormatString(self.method_))
if self.has_resource_: res+=prefix+("resource: %s\n" % self.DebugFormatString(self.resource_))
if self.has_http_version_: res+=prefix+("http_version: %s\n" % self.DebugFormatString(self.http_version_))
if self.has_status_: res+=prefix+("status: %s\n" % self.DebugFormatInt32(self.status_))
if self.has_response_size_: res+=prefix+("response_size: %s\n" % self.DebugFormatInt64(self.response_size_))
if self.has_referrer_: res+=prefix+("referrer: %s\n" % self.DebugFormatString(self.referrer_))
if self.has_user_agent_: res+=prefix+("user_agent: %s\n" % self.DebugFormatString(self.user_agent_))
if self.has_url_map_entry_: res+=prefix+("url_map_entry: %s\n" % self.DebugFormatString(self.url_map_entry_))
if self.has_combined_: res+=prefix+("combined: %s\n" % self.DebugFormatString(self.combined_))
if self.has_api_mcycles_: res+=prefix+("api_mcycles: %s\n" % self.DebugFormatInt64(self.api_mcycles_))
if self.has_host_: res+=prefix+("host: %s\n" % self.DebugFormatString(self.host_))
if self.has_cost_: res+=prefix+("cost: %s\n" % self.DebugFormat(self.cost_))
if self.has_task_queue_name_: res+=prefix+("task_queue_name: %s\n" % self.DebugFormatString(self.task_queue_name_))
if self.has_task_name_: res+=prefix+("task_name: %s\n" % self.DebugFormatString(self.task_name_))
if self.has_was_loading_request_: res+=prefix+("was_loading_request: %s\n" % self.DebugFormatBool(self.was_loading_request_))
if self.has_pending_time_: res+=prefix+("pending_time: %s\n" % self.DebugFormatInt64(self.pending_time_))
if self.has_replica_index_: res+=prefix+("replica_index: %s\n" % self.DebugFormatInt32(self.replica_index_))
if self.has_finished_: res+=prefix+("finished: %s\n" % self.DebugFormatBool(self.finished_))
if self.has_clone_key_: res+=prefix+("clone_key: %s\n" % self.DebugFormatString(self.clone_key_))
cnt=0
for e in self.line_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("line%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_lines_incomplete_: res+=prefix+("lines_incomplete: %s\n" % self.DebugFormatBool(self.lines_incomplete_))
if self.has_app_engine_release_: res+=prefix+("app_engine_release: %s\n" % self.DebugFormatString(self.app_engine_release_))
if self.has_trace_id_: res+=prefix+("trace_id: %s\n" % self.DebugFormatString(self.trace_id_))
if self.has_exit_reason_: res+=prefix+("exit_reason: %s\n" % self.DebugFormatInt32(self.exit_reason_))
if self.has_was_throttled_for_time_: res+=prefix+("was_throttled_for_time: %s\n" % self.DebugFormatBool(self.was_throttled_for_time_))
if self.has_was_throttled_for_requests_: res+=prefix+("was_throttled_for_requests: %s\n" % self.DebugFormatBool(self.was_throttled_for_requests_))
if self.has_throttled_time_: res+=prefix+("throttled_time: %s\n" % self.DebugFormatInt64(self.throttled_time_))
if self.has_server_name_: res+=prefix+("server_name: %s\n" % self.DebugFormatString(self.server_name_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kapp_id = 1
kmodule_id = 37
kversion_id = 2
krequest_id = 3
koffset = 35
kip = 4
knickname = 5
kstart_time = 6
kend_time = 7
klatency = 8
kmcycles = 9
kmethod = 10
kresource = 11
khttp_version = 12
kstatus = 13
kresponse_size = 14
kreferrer = 15
kuser_agent = 16
kurl_map_entry = 17
kcombined = 18
kapi_mcycles = 19
khost = 20
kcost = 21
ktask_queue_name = 22
ktask_name = 23
kwas_loading_request = 24
kpending_time = 25
kreplica_index = 26
kfinished = 27
kclone_key = 28
kline = 29
klines_incomplete = 36
kapp_engine_release = 38
ktrace_id = 39
kexit_reason = 30
kwas_throttled_for_time = 31
kwas_throttled_for_requests = 32
kthrottled_time = 33
kserver_name = 34
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "version_id",
3: "request_id",
4: "ip",
5: "nickname",
6: "start_time",
7: "end_time",
8: "latency",
9: "mcycles",
10: "method",
11: "resource",
12: "http_version",
13: "status",
14: "response_size",
15: "referrer",
16: "user_agent",
17: "url_map_entry",
18: "combined",
19: "api_mcycles",
20: "host",
21: "cost",
22: "task_queue_name",
23: "task_name",
24: "was_loading_request",
25: "pending_time",
26: "replica_index",
27: "finished",
28: "clone_key",
29: "line",
30: "exit_reason",
31: "was_throttled_for_time",
32: "was_throttled_for_requests",
33: "throttled_time",
34: "server_name",
35: "offset",
36: "lines_incomplete",
37: "module_id",
38: "app_engine_release",
39: "trace_id",
}, 39)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.STRING,
4: ProtocolBuffer.Encoder.STRING,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.STRING,
11: ProtocolBuffer.Encoder.STRING,
12: ProtocolBuffer.Encoder.STRING,
13: ProtocolBuffer.Encoder.NUMERIC,
14: ProtocolBuffer.Encoder.NUMERIC,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.STRING,
17: ProtocolBuffer.Encoder.STRING,
18: ProtocolBuffer.Encoder.STRING,
19: ProtocolBuffer.Encoder.NUMERIC,
20: ProtocolBuffer.Encoder.STRING,
21: ProtocolBuffer.Encoder.DOUBLE,
22: ProtocolBuffer.Encoder.STRING,
23: ProtocolBuffer.Encoder.STRING,
24: ProtocolBuffer.Encoder.NUMERIC,
25: ProtocolBuffer.Encoder.NUMERIC,
26: ProtocolBuffer.Encoder.NUMERIC,
27: ProtocolBuffer.Encoder.NUMERIC,
28: ProtocolBuffer.Encoder.STRING,
29: ProtocolBuffer.Encoder.STRING,
30: ProtocolBuffer.Encoder.NUMERIC,
31: ProtocolBuffer.Encoder.NUMERIC,
32: ProtocolBuffer.Encoder.NUMERIC,
33: ProtocolBuffer.Encoder.NUMERIC,
34: ProtocolBuffer.Encoder.STRING,
35: ProtocolBuffer.Encoder.STRING,
36: ProtocolBuffer.Encoder.NUMERIC,
37: ProtocolBuffer.Encoder.STRING,
38: ProtocolBuffer.Encoder.STRING,
39: ProtocolBuffer.Encoder.STRING,
}, 39, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.RequestLog'
class LogModuleVersion(ProtocolBuffer.ProtocolMessage):
has_module_id_ = 0
module_id_ = "default"
has_module_id_set_ = 0
module_id_set_ = 0
has_version_id_ = 0
version_id_ = ""
has_version_id_set_ = 0
version_id_set_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def module_id(self): return self.module_id_
def set_module_id(self, x):
self.has_module_id_ = 1
self.module_id_ = x
def clear_module_id(self):
if self.has_module_id_:
self.has_module_id_ = 0
self.module_id_ = "default"
def has_module_id(self): return self.has_module_id_
def module_id_set(self): return self.module_id_set_
def set_module_id_set(self, x):
self.has_module_id_set_ = 1
self.module_id_set_ = x
def clear_module_id_set(self):
if self.has_module_id_set_:
self.has_module_id_set_ = 0
self.module_id_set_ = 0
def has_module_id_set(self): return self.has_module_id_set_
def version_id(self): return self.version_id_
def set_version_id(self, x):
self.has_version_id_ = 1
self.version_id_ = x
def clear_version_id(self):
if self.has_version_id_:
self.has_version_id_ = 0
self.version_id_ = ""
def has_version_id(self): return self.has_version_id_
def version_id_set(self): return self.version_id_set_
def set_version_id_set(self, x):
self.has_version_id_set_ = 1
self.version_id_set_ = x
def clear_version_id_set(self):
if self.has_version_id_set_:
self.has_version_id_set_ = 0
self.version_id_set_ = 0
def has_version_id_set(self): return self.has_version_id_set_
def MergeFrom(self, x):
assert x is not self
if (x.has_module_id()): self.set_module_id(x.module_id())
if (x.has_module_id_set()): self.set_module_id_set(x.module_id_set())
if (x.has_version_id()): self.set_version_id(x.version_id())
if (x.has_version_id_set()): self.set_version_id_set(x.version_id_set())
def Equals(self, x):
if x is self: return 1
if self.has_module_id_ != x.has_module_id_: return 0
if self.has_module_id_ and self.module_id_ != x.module_id_: return 0
if self.has_module_id_set_ != x.has_module_id_set_: return 0
if self.has_module_id_set_ and self.module_id_set_ != x.module_id_set_: return 0
if self.has_version_id_ != x.has_version_id_: return 0
if self.has_version_id_ and self.version_id_ != x.version_id_: return 0
if self.has_version_id_set_ != x.has_version_id_set_: return 0
if self.has_version_id_set_ and self.version_id_set_ != x.version_id_set_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_module_id_): n += 1 + self.lengthString(len(self.module_id_))
if (self.has_module_id_set_): n += 3
if (self.has_version_id_): n += 1 + self.lengthString(len(self.version_id_))
if (self.has_version_id_set_): n += 3
return n
def ByteSizePartial(self):
n = 0
if (self.has_module_id_): n += 1 + self.lengthString(len(self.module_id_))
if (self.has_module_id_set_): n += 3
if (self.has_version_id_): n += 1 + self.lengthString(len(self.version_id_))
if (self.has_version_id_set_): n += 3
return n
def Clear(self):
self.clear_module_id()
self.clear_module_id_set()
self.clear_version_id()
self.clear_version_id_set()
def OutputUnchecked(self, out):
if (self.has_module_id_):
out.putVarInt32(10)
out.putPrefixedString(self.module_id_)
if (self.has_version_id_):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_)
if (self.has_module_id_set_):
out.putVarInt32(808)
out.putBoolean(self.module_id_set_)
if (self.has_version_id_set_):
out.putVarInt32(816)
out.putBoolean(self.version_id_set_)
def OutputPartial(self, out):
if (self.has_module_id_):
out.putVarInt32(10)
out.putPrefixedString(self.module_id_)
if (self.has_version_id_):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_)
if (self.has_module_id_set_):
out.putVarInt32(808)
out.putBoolean(self.module_id_set_)
if (self.has_version_id_set_):
out.putVarInt32(816)
out.putBoolean(self.version_id_set_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_module_id(d.getPrefixedString())
continue
if tt == 18:
self.set_version_id(d.getPrefixedString())
continue
if tt == 808:
self.set_module_id_set(d.getBoolean())
continue
if tt == 816:
self.set_version_id_set(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_module_id_: res+=prefix+("module_id: %s\n" % self.DebugFormatString(self.module_id_))
if self.has_module_id_set_: res+=prefix+("module_id_set: %s\n" % self.DebugFormatBool(self.module_id_set_))
if self.has_version_id_: res+=prefix+("version_id: %s\n" % self.DebugFormatString(self.version_id_))
if self.has_version_id_set_: res+=prefix+("version_id_set: %s\n" % self.DebugFormatBool(self.version_id_set_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kmodule_id = 1
kmodule_id_set = 101
kversion_id = 2
kversion_id_set = 102
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "module_id",
2: "version_id",
101: "module_id_set",
102: "version_id_set",
}, 102)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
101: ProtocolBuffer.Encoder.NUMERIC,
102: ProtocolBuffer.Encoder.NUMERIC,
}, 102, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogModuleVersion'
class LogReadRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_start_time_ = 0
start_time_ = 0
has_start_time_set_ = 0
start_time_set_ = 0
has_end_time_ = 0
end_time_ = 0
has_end_time_set_ = 0
end_time_set_ = 0
has_offset_ = 0
offset_ = None
has_minimum_log_level_ = 0
minimum_log_level_ = 0
has_minimum_log_level_set_ = 0
minimum_log_level_set_ = 0
has_include_incomplete_ = 0
include_incomplete_ = 0
has_count_ = 0
count_ = 0
has_count_set_ = 0
count_set_ = 0
has_combined_log_regex_ = 0
combined_log_regex_ = ""
has_combined_log_regex_set_ = 0
combined_log_regex_set_ = 0
has_host_regex_ = 0
host_regex_ = ""
has_host_regex_set_ = 0
host_regex_set_ = 0
has_replica_index_ = 0
replica_index_ = 0
has_replica_index_set_ = 0
replica_index_set_ = 0
has_include_app_logs_ = 0
include_app_logs_ = 0
has_app_logs_per_request_ = 0
app_logs_per_request_ = 0
has_app_logs_per_request_set_ = 0
app_logs_per_request_set_ = 0
has_include_host_ = 0
include_host_ = 0
has_include_all_ = 0
include_all_ = 0
has_cache_iterator_ = 0
cache_iterator_ = 0
has_num_shards_ = 0
num_shards_ = 0
has_num_shards_set_ = 0
num_shards_set_ = 0
def __init__(self, contents=None):
self.version_id_ = []
self.module_version_ = []
self.request_id_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def version_id_size(self): return len(self.version_id_)
def version_id_list(self): return self.version_id_
def version_id(self, i):
return self.version_id_[i]
def set_version_id(self, i, x):
self.version_id_[i] = x
def add_version_id(self, x):
self.version_id_.append(x)
def clear_version_id(self):
self.version_id_ = []
def module_version_size(self): return len(self.module_version_)
def module_version_list(self): return self.module_version_
def module_version(self, i):
return self.module_version_[i]
def mutable_module_version(self, i):
return self.module_version_[i]
def add_module_version(self):
x = LogModuleVersion()
self.module_version_.append(x)
return x
def clear_module_version(self):
self.module_version_ = []
def start_time(self): return self.start_time_
def set_start_time(self, x):
self.has_start_time_ = 1
self.start_time_ = x
def clear_start_time(self):
if self.has_start_time_:
self.has_start_time_ = 0
self.start_time_ = 0
def has_start_time(self): return self.has_start_time_
def start_time_set(self): return self.start_time_set_
def set_start_time_set(self, x):
self.has_start_time_set_ = 1
self.start_time_set_ = x
def clear_start_time_set(self):
if self.has_start_time_set_:
self.has_start_time_set_ = 0
self.start_time_set_ = 0
def has_start_time_set(self): return self.has_start_time_set_
def end_time(self): return self.end_time_
def set_end_time(self, x):
self.has_end_time_ = 1
self.end_time_ = x
def clear_end_time(self):
if self.has_end_time_:
self.has_end_time_ = 0
self.end_time_ = 0
def has_end_time(self): return self.has_end_time_
def end_time_set(self): return self.end_time_set_
def set_end_time_set(self, x):
self.has_end_time_set_ = 1
self.end_time_set_ = x
def clear_end_time_set(self):
if self.has_end_time_set_:
self.has_end_time_set_ = 0
self.end_time_set_ = 0
def has_end_time_set(self): return self.has_end_time_set_
def offset(self):
if self.offset_ is None:
self.lazy_init_lock_.acquire()
try:
if self.offset_ is None: self.offset_ = LogOffset()
finally:
self.lazy_init_lock_.release()
return self.offset_
def mutable_offset(self): self.has_offset_ = 1; return self.offset()
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0;
if self.offset_ is not None: self.offset_.Clear()
def has_offset(self): return self.has_offset_
def request_id_size(self): return len(self.request_id_)
def request_id_list(self): return self.request_id_
def request_id(self, i):
return self.request_id_[i]
def set_request_id(self, i, x):
self.request_id_[i] = x
def add_request_id(self, x):
self.request_id_.append(x)
def clear_request_id(self):
self.request_id_ = []
def minimum_log_level(self): return self.minimum_log_level_
def set_minimum_log_level(self, x):
self.has_minimum_log_level_ = 1
self.minimum_log_level_ = x
def clear_minimum_log_level(self):
if self.has_minimum_log_level_:
self.has_minimum_log_level_ = 0
self.minimum_log_level_ = 0
def has_minimum_log_level(self): return self.has_minimum_log_level_
def minimum_log_level_set(self): return self.minimum_log_level_set_
def set_minimum_log_level_set(self, x):
self.has_minimum_log_level_set_ = 1
self.minimum_log_level_set_ = x
def clear_minimum_log_level_set(self):
if self.has_minimum_log_level_set_:
self.has_minimum_log_level_set_ = 0
self.minimum_log_level_set_ = 0
def has_minimum_log_level_set(self): return self.has_minimum_log_level_set_
def include_incomplete(self): return self.include_incomplete_
def set_include_incomplete(self, x):
self.has_include_incomplete_ = 1
self.include_incomplete_ = x
def clear_include_incomplete(self):
if self.has_include_incomplete_:
self.has_include_incomplete_ = 0
self.include_incomplete_ = 0
def has_include_incomplete(self): return self.has_include_incomplete_
def count(self): return self.count_
def set_count(self, x):
self.has_count_ = 1
self.count_ = x
def clear_count(self):
if self.has_count_:
self.has_count_ = 0
self.count_ = 0
def has_count(self): return self.has_count_
def count_set(self): return self.count_set_
def set_count_set(self, x):
self.has_count_set_ = 1
self.count_set_ = x
def clear_count_set(self):
if self.has_count_set_:
self.has_count_set_ = 0
self.count_set_ = 0
def has_count_set(self): return self.has_count_set_
def combined_log_regex(self): return self.combined_log_regex_
def set_combined_log_regex(self, x):
self.has_combined_log_regex_ = 1
self.combined_log_regex_ = x
def clear_combined_log_regex(self):
if self.has_combined_log_regex_:
self.has_combined_log_regex_ = 0
self.combined_log_regex_ = ""
def has_combined_log_regex(self): return self.has_combined_log_regex_
def combined_log_regex_set(self): return self.combined_log_regex_set_
def set_combined_log_regex_set(self, x):
self.has_combined_log_regex_set_ = 1
self.combined_log_regex_set_ = x
def clear_combined_log_regex_set(self):
if self.has_combined_log_regex_set_:
self.has_combined_log_regex_set_ = 0
self.combined_log_regex_set_ = 0
def has_combined_log_regex_set(self): return self.has_combined_log_regex_set_
def host_regex(self): return self.host_regex_
def set_host_regex(self, x):
self.has_host_regex_ = 1
self.host_regex_ = x
def clear_host_regex(self):
if self.has_host_regex_:
self.has_host_regex_ = 0
self.host_regex_ = ""
def has_host_regex(self): return self.has_host_regex_
def host_regex_set(self): return self.host_regex_set_
def set_host_regex_set(self, x):
self.has_host_regex_set_ = 1
self.host_regex_set_ = x
def clear_host_regex_set(self):
if self.has_host_regex_set_:
self.has_host_regex_set_ = 0
self.host_regex_set_ = 0
def has_host_regex_set(self): return self.has_host_regex_set_
def replica_index(self): return self.replica_index_
def set_replica_index(self, x):
self.has_replica_index_ = 1
self.replica_index_ = x
def clear_replica_index(self):
if self.has_replica_index_:
self.has_replica_index_ = 0
self.replica_index_ = 0
def has_replica_index(self): return self.has_replica_index_
def replica_index_set(self): return self.replica_index_set_
def set_replica_index_set(self, x):
self.has_replica_index_set_ = 1
self.replica_index_set_ = x
def clear_replica_index_set(self):
if self.has_replica_index_set_:
self.has_replica_index_set_ = 0
self.replica_index_set_ = 0
def has_replica_index_set(self): return self.has_replica_index_set_
def include_app_logs(self): return self.include_app_logs_
def set_include_app_logs(self, x):
self.has_include_app_logs_ = 1
self.include_app_logs_ = x
def clear_include_app_logs(self):
if self.has_include_app_logs_:
self.has_include_app_logs_ = 0
self.include_app_logs_ = 0
def has_include_app_logs(self): return self.has_include_app_logs_
def app_logs_per_request(self): return self.app_logs_per_request_
def set_app_logs_per_request(self, x):
self.has_app_logs_per_request_ = 1
self.app_logs_per_request_ = x
def clear_app_logs_per_request(self):
if self.has_app_logs_per_request_:
self.has_app_logs_per_request_ = 0
self.app_logs_per_request_ = 0
def has_app_logs_per_request(self): return self.has_app_logs_per_request_
def app_logs_per_request_set(self): return self.app_logs_per_request_set_
def set_app_logs_per_request_set(self, x):
self.has_app_logs_per_request_set_ = 1
self.app_logs_per_request_set_ = x
def clear_app_logs_per_request_set(self):
if self.has_app_logs_per_request_set_:
self.has_app_logs_per_request_set_ = 0
self.app_logs_per_request_set_ = 0
def has_app_logs_per_request_set(self): return self.has_app_logs_per_request_set_
def include_host(self): return self.include_host_
def set_include_host(self, x):
self.has_include_host_ = 1
self.include_host_ = x
def clear_include_host(self):
if self.has_include_host_:
self.has_include_host_ = 0
self.include_host_ = 0
def has_include_host(self): return self.has_include_host_
def include_all(self): return self.include_all_
def set_include_all(self, x):
self.has_include_all_ = 1
self.include_all_ = x
def clear_include_all(self):
if self.has_include_all_:
self.has_include_all_ = 0
self.include_all_ = 0
def has_include_all(self): return self.has_include_all_
def cache_iterator(self): return self.cache_iterator_
def set_cache_iterator(self, x):
self.has_cache_iterator_ = 1
self.cache_iterator_ = x
def clear_cache_iterator(self):
if self.has_cache_iterator_:
self.has_cache_iterator_ = 0
self.cache_iterator_ = 0
def has_cache_iterator(self): return self.has_cache_iterator_
def num_shards(self): return self.num_shards_
def set_num_shards(self, x):
self.has_num_shards_ = 1
self.num_shards_ = x
def clear_num_shards(self):
if self.has_num_shards_:
self.has_num_shards_ = 0
self.num_shards_ = 0
def has_num_shards(self): return self.has_num_shards_
def num_shards_set(self): return self.num_shards_set_
def set_num_shards_set(self, x):
self.has_num_shards_set_ = 1
self.num_shards_set_ = x
def clear_num_shards_set(self):
if self.has_num_shards_set_:
self.has_num_shards_set_ = 0
self.num_shards_set_ = 0
def has_num_shards_set(self): return self.has_num_shards_set_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
for i in range(x.version_id_size()): self.add_version_id(x.version_id(i))
for i in range(x.module_version_size()): self.add_module_version().CopyFrom(x.module_version(i))
if (x.has_start_time()): self.set_start_time(x.start_time())
if (x.has_start_time_set()): self.set_start_time_set(x.start_time_set())
if (x.has_end_time()): self.set_end_time(x.end_time())
if (x.has_end_time_set()): self.set_end_time_set(x.end_time_set())
if (x.has_offset()): self.mutable_offset().MergeFrom(x.offset())
for i in range(x.request_id_size()): self.add_request_id(x.request_id(i))
if (x.has_minimum_log_level()): self.set_minimum_log_level(x.minimum_log_level())
if (x.has_minimum_log_level_set()): self.set_minimum_log_level_set(x.minimum_log_level_set())
if (x.has_include_incomplete()): self.set_include_incomplete(x.include_incomplete())
if (x.has_count()): self.set_count(x.count())
if (x.has_count_set()): self.set_count_set(x.count_set())
if (x.has_combined_log_regex()): self.set_combined_log_regex(x.combined_log_regex())
if (x.has_combined_log_regex_set()): self.set_combined_log_regex_set(x.combined_log_regex_set())
if (x.has_host_regex()): self.set_host_regex(x.host_regex())
if (x.has_host_regex_set()): self.set_host_regex_set(x.host_regex_set())
if (x.has_replica_index()): self.set_replica_index(x.replica_index())
if (x.has_replica_index_set()): self.set_replica_index_set(x.replica_index_set())
if (x.has_include_app_logs()): self.set_include_app_logs(x.include_app_logs())
if (x.has_app_logs_per_request()): self.set_app_logs_per_request(x.app_logs_per_request())
if (x.has_app_logs_per_request_set()): self.set_app_logs_per_request_set(x.app_logs_per_request_set())
if (x.has_include_host()): self.set_include_host(x.include_host())
if (x.has_include_all()): self.set_include_all(x.include_all())
if (x.has_cache_iterator()): self.set_cache_iterator(x.cache_iterator())
if (x.has_num_shards()): self.set_num_shards(x.num_shards())
if (x.has_num_shards_set()): self.set_num_shards_set(x.num_shards_set())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if len(self.version_id_) != len(x.version_id_): return 0
for e1, e2 in zip(self.version_id_, x.version_id_):
if e1 != e2: return 0
if len(self.module_version_) != len(x.module_version_): return 0
for e1, e2 in zip(self.module_version_, x.module_version_):
if e1 != e2: return 0
if self.has_start_time_ != x.has_start_time_: return 0
if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
if self.has_start_time_set_ != x.has_start_time_set_: return 0
if self.has_start_time_set_ and self.start_time_set_ != x.start_time_set_: return 0
if self.has_end_time_ != x.has_end_time_: return 0
if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
if self.has_end_time_set_ != x.has_end_time_set_: return 0
if self.has_end_time_set_ and self.end_time_set_ != x.end_time_set_: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if len(self.request_id_) != len(x.request_id_): return 0
for e1, e2 in zip(self.request_id_, x.request_id_):
if e1 != e2: return 0
if self.has_minimum_log_level_ != x.has_minimum_log_level_: return 0
if self.has_minimum_log_level_ and self.minimum_log_level_ != x.minimum_log_level_: return 0
if self.has_minimum_log_level_set_ != x.has_minimum_log_level_set_: return 0
if self.has_minimum_log_level_set_ and self.minimum_log_level_set_ != x.minimum_log_level_set_: return 0
if self.has_include_incomplete_ != x.has_include_incomplete_: return 0
if self.has_include_incomplete_ and self.include_incomplete_ != x.include_incomplete_: return 0
if self.has_count_ != x.has_count_: return 0
if self.has_count_ and self.count_ != x.count_: return 0
if self.has_count_set_ != x.has_count_set_: return 0
if self.has_count_set_ and self.count_set_ != x.count_set_: return 0
if self.has_combined_log_regex_ != x.has_combined_log_regex_: return 0
if self.has_combined_log_regex_ and self.combined_log_regex_ != x.combined_log_regex_: return 0
if self.has_combined_log_regex_set_ != x.has_combined_log_regex_set_: return 0
if self.has_combined_log_regex_set_ and self.combined_log_regex_set_ != x.combined_log_regex_set_: return 0
if self.has_host_regex_ != x.has_host_regex_: return 0
if self.has_host_regex_ and self.host_regex_ != x.host_regex_: return 0
if self.has_host_regex_set_ != x.has_host_regex_set_: return 0
if self.has_host_regex_set_ and self.host_regex_set_ != x.host_regex_set_: return 0
if self.has_replica_index_ != x.has_replica_index_: return 0
if self.has_replica_index_ and self.replica_index_ != x.replica_index_: return 0
if self.has_replica_index_set_ != x.has_replica_index_set_: return 0
if self.has_replica_index_set_ and self.replica_index_set_ != x.replica_index_set_: return 0
if self.has_include_app_logs_ != x.has_include_app_logs_: return 0
if self.has_include_app_logs_ and self.include_app_logs_ != x.include_app_logs_: return 0
if self.has_app_logs_per_request_ != x.has_app_logs_per_request_: return 0
if self.has_app_logs_per_request_ and self.app_logs_per_request_ != x.app_logs_per_request_: return 0
if self.has_app_logs_per_request_set_ != x.has_app_logs_per_request_set_: return 0
if self.has_app_logs_per_request_set_ and self.app_logs_per_request_set_ != x.app_logs_per_request_set_: return 0
if self.has_include_host_ != x.has_include_host_: return 0
if self.has_include_host_ and self.include_host_ != x.include_host_: return 0
if self.has_include_all_ != x.has_include_all_: return 0
if self.has_include_all_ and self.include_all_ != x.include_all_: return 0
if self.has_cache_iterator_ != x.has_cache_iterator_: return 0
if self.has_cache_iterator_ and self.cache_iterator_ != x.cache_iterator_: return 0
if self.has_num_shards_ != x.has_num_shards_: return 0
if self.has_num_shards_ and self.num_shards_ != x.num_shards_: return 0
if self.has_num_shards_set_ != x.has_num_shards_set_: return 0
if self.has_num_shards_set_ and self.num_shards_set_ != x.num_shards_set_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
for p in self.module_version_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_offset_ and not self.offset_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += 1 * len(self.version_id_)
for i in range(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
n += 2 * len(self.module_version_)
for i in range(len(self.module_version_)): n += self.lengthString(self.module_version_[i].ByteSize())
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_start_time_set_): n += 3
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_end_time_set_): n += 3
if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSize())
n += 1 * len(self.request_id_)
for i in range(len(self.request_id_)): n += self.lengthString(len(self.request_id_[i]))
if (self.has_minimum_log_level_): n += 1 + self.lengthVarInt64(self.minimum_log_level_)
if (self.has_minimum_log_level_set_): n += 3
if (self.has_include_incomplete_): n += 2
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_count_set_): n += 3
if (self.has_combined_log_regex_): n += 1 + self.lengthString(len(self.combined_log_regex_))
if (self.has_combined_log_regex_set_): n += 3
if (self.has_host_regex_): n += 1 + self.lengthString(len(self.host_regex_))
if (self.has_host_regex_set_): n += 3
if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
if (self.has_replica_index_set_): n += 3
if (self.has_include_app_logs_): n += 2
if (self.has_app_logs_per_request_): n += 2 + self.lengthVarInt64(self.app_logs_per_request_)
if (self.has_app_logs_per_request_set_): n += 3
if (self.has_include_host_): n += 2
if (self.has_include_all_): n += 2
if (self.has_cache_iterator_): n += 2
if (self.has_num_shards_): n += 2 + self.lengthVarInt64(self.num_shards_)
if (self.has_num_shards_set_): n += 3
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
n += 1 * len(self.version_id_)
for i in range(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
n += 2 * len(self.module_version_)
for i in range(len(self.module_version_)): n += self.lengthString(self.module_version_[i].ByteSizePartial())
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_start_time_set_): n += 3
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_end_time_set_): n += 3
if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSizePartial())
n += 1 * len(self.request_id_)
for i in range(len(self.request_id_)): n += self.lengthString(len(self.request_id_[i]))
if (self.has_minimum_log_level_): n += 1 + self.lengthVarInt64(self.minimum_log_level_)
if (self.has_minimum_log_level_set_): n += 3
if (self.has_include_incomplete_): n += 2
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_count_set_): n += 3
if (self.has_combined_log_regex_): n += 1 + self.lengthString(len(self.combined_log_regex_))
if (self.has_combined_log_regex_set_): n += 3
if (self.has_host_regex_): n += 1 + self.lengthString(len(self.host_regex_))
if (self.has_host_regex_set_): n += 3
if (self.has_replica_index_): n += 2 + self.lengthVarInt64(self.replica_index_)
if (self.has_replica_index_set_): n += 3
if (self.has_include_app_logs_): n += 2
if (self.has_app_logs_per_request_): n += 2 + self.lengthVarInt64(self.app_logs_per_request_)
if (self.has_app_logs_per_request_set_): n += 3
if (self.has_include_host_): n += 2
if (self.has_include_all_): n += 2
if (self.has_cache_iterator_): n += 2
if (self.has_num_shards_): n += 2 + self.lengthVarInt64(self.num_shards_)
if (self.has_num_shards_set_): n += 3
return n
def Clear(self):
self.clear_app_id()
self.clear_version_id()
self.clear_module_version()
self.clear_start_time()
self.clear_start_time_set()
self.clear_end_time()
self.clear_end_time_set()
self.clear_offset()
self.clear_request_id()
self.clear_minimum_log_level()
self.clear_minimum_log_level_set()
self.clear_include_incomplete()
self.clear_count()
self.clear_count_set()
self.clear_combined_log_regex()
self.clear_combined_log_regex_set()
self.clear_host_regex()
self.clear_host_regex_set()
self.clear_replica_index()
self.clear_replica_index_set()
self.clear_include_app_logs()
self.clear_app_logs_per_request()
self.clear_app_logs_per_request_set()
self.clear_include_host()
self.clear_include_all()
self.clear_cache_iterator()
self.clear_num_shards()
self.clear_num_shards_set()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in range(len(self.version_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_[i])
if (self.has_start_time_):
out.putVarInt32(24)
out.putVarInt64(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(32)
out.putVarInt64(self.end_time_)
if (self.has_offset_):
out.putVarInt32(42)
out.putVarInt32(self.offset_.ByteSize())
self.offset_.OutputUnchecked(out)
for i in range(len(self.request_id_)):
out.putVarInt32(50)
out.putPrefixedString(self.request_id_[i])
if (self.has_minimum_log_level_):
out.putVarInt32(56)
out.putVarInt32(self.minimum_log_level_)
if (self.has_include_incomplete_):
out.putVarInt32(64)
out.putBoolean(self.include_incomplete_)
if (self.has_count_):
out.putVarInt32(72)
out.putVarInt64(self.count_)
if (self.has_include_app_logs_):
out.putVarInt32(80)
out.putBoolean(self.include_app_logs_)
if (self.has_include_host_):
out.putVarInt32(88)
out.putBoolean(self.include_host_)
if (self.has_include_all_):
out.putVarInt32(96)
out.putBoolean(self.include_all_)
if (self.has_cache_iterator_):
out.putVarInt32(104)
out.putBoolean(self.cache_iterator_)
if (self.has_combined_log_regex_):
out.putVarInt32(114)
out.putPrefixedString(self.combined_log_regex_)
if (self.has_host_regex_):
out.putVarInt32(122)
out.putPrefixedString(self.host_regex_)
if (self.has_replica_index_):
out.putVarInt32(128)
out.putVarInt32(self.replica_index_)
if (self.has_app_logs_per_request_):
out.putVarInt32(136)
out.putVarInt32(self.app_logs_per_request_)
if (self.has_num_shards_):
out.putVarInt32(144)
out.putVarInt32(self.num_shards_)
for i in range(len(self.module_version_)):
out.putVarInt32(154)
out.putVarInt32(self.module_version_[i].ByteSize())
self.module_version_[i].OutputUnchecked(out)
if (self.has_start_time_set_):
out.putVarInt32(824)
out.putBoolean(self.start_time_set_)
if (self.has_end_time_set_):
out.putVarInt32(832)
out.putBoolean(self.end_time_set_)
if (self.has_minimum_log_level_set_):
out.putVarInt32(856)
out.putBoolean(self.minimum_log_level_set_)
if (self.has_count_set_):
out.putVarInt32(872)
out.putBoolean(self.count_set_)
if (self.has_combined_log_regex_set_):
out.putVarInt32(912)
out.putBoolean(self.combined_log_regex_set_)
if (self.has_host_regex_set_):
out.putVarInt32(920)
out.putBoolean(self.host_regex_set_)
if (self.has_replica_index_set_):
out.putVarInt32(928)
out.putBoolean(self.replica_index_set_)
if (self.has_app_logs_per_request_set_):
out.putVarInt32(936)
out.putBoolean(self.app_logs_per_request_set_)
if (self.has_num_shards_set_):
out.putVarInt32(944)
out.putBoolean(self.num_shards_set_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in range(len(self.version_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_[i])
if (self.has_start_time_):
out.putVarInt32(24)
out.putVarInt64(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(32)
out.putVarInt64(self.end_time_)
if (self.has_offset_):
out.putVarInt32(42)
out.putVarInt32(self.offset_.ByteSizePartial())
self.offset_.OutputPartial(out)
for i in range(len(self.request_id_)):
out.putVarInt32(50)
out.putPrefixedString(self.request_id_[i])
if (self.has_minimum_log_level_):
out.putVarInt32(56)
out.putVarInt32(self.minimum_log_level_)
if (self.has_include_incomplete_):
out.putVarInt32(64)
out.putBoolean(self.include_incomplete_)
if (self.has_count_):
out.putVarInt32(72)
out.putVarInt64(self.count_)
if (self.has_include_app_logs_):
out.putVarInt32(80)
out.putBoolean(self.include_app_logs_)
if (self.has_include_host_):
out.putVarInt32(88)
out.putBoolean(self.include_host_)
if (self.has_include_all_):
out.putVarInt32(96)
out.putBoolean(self.include_all_)
if (self.has_cache_iterator_):
out.putVarInt32(104)
out.putBoolean(self.cache_iterator_)
if (self.has_combined_log_regex_):
out.putVarInt32(114)
out.putPrefixedString(self.combined_log_regex_)
if (self.has_host_regex_):
out.putVarInt32(122)
out.putPrefixedString(self.host_regex_)
if (self.has_replica_index_):
out.putVarInt32(128)
out.putVarInt32(self.replica_index_)
if (self.has_app_logs_per_request_):
out.putVarInt32(136)
out.putVarInt32(self.app_logs_per_request_)
if (self.has_num_shards_):
out.putVarInt32(144)
out.putVarInt32(self.num_shards_)
for i in range(len(self.module_version_)):
out.putVarInt32(154)
out.putVarInt32(self.module_version_[i].ByteSizePartial())
self.module_version_[i].OutputPartial(out)
if (self.has_start_time_set_):
out.putVarInt32(824)
out.putBoolean(self.start_time_set_)
if (self.has_end_time_set_):
out.putVarInt32(832)
out.putBoolean(self.end_time_set_)
if (self.has_minimum_log_level_set_):
out.putVarInt32(856)
out.putBoolean(self.minimum_log_level_set_)
if (self.has_count_set_):
out.putVarInt32(872)
out.putBoolean(self.count_set_)
if (self.has_combined_log_regex_set_):
out.putVarInt32(912)
out.putBoolean(self.combined_log_regex_set_)
if (self.has_host_regex_set_):
out.putVarInt32(920)
out.putBoolean(self.host_regex_set_)
if (self.has_replica_index_set_):
out.putVarInt32(928)
out.putBoolean(self.replica_index_set_)
if (self.has_app_logs_per_request_set_):
out.putVarInt32(936)
out.putBoolean(self.app_logs_per_request_set_)
if (self.has_num_shards_set_):
out.putVarInt32(944)
out.putBoolean(self.num_shards_set_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.add_version_id(d.getPrefixedString())
continue
if tt == 24:
self.set_start_time(d.getVarInt64())
continue
if tt == 32:
self.set_end_time(d.getVarInt64())
continue
if tt == 42:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_offset().TryMerge(tmp)
continue
if tt == 50:
self.add_request_id(d.getPrefixedString())
continue
if tt == 56:
self.set_minimum_log_level(d.getVarInt32())
continue
if tt == 64:
self.set_include_incomplete(d.getBoolean())
continue
if tt == 72:
self.set_count(d.getVarInt64())
continue
if tt == 80:
self.set_include_app_logs(d.getBoolean())
continue
if tt == 88:
self.set_include_host(d.getBoolean())
continue
if tt == 96:
self.set_include_all(d.getBoolean())
continue
if tt == 104:
self.set_cache_iterator(d.getBoolean())
continue
if tt == 114:
self.set_combined_log_regex(d.getPrefixedString())
continue
if tt == 122:
self.set_host_regex(d.getPrefixedString())
continue
if tt == 128:
self.set_replica_index(d.getVarInt32())
continue
if tt == 136:
self.set_app_logs_per_request(d.getVarInt32())
continue
if tt == 144:
self.set_num_shards(d.getVarInt32())
continue
if tt == 154:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_module_version().TryMerge(tmp)
continue
if tt == 824:
self.set_start_time_set(d.getBoolean())
continue
if tt == 832:
self.set_end_time_set(d.getBoolean())
continue
if tt == 856:
self.set_minimum_log_level_set(d.getBoolean())
continue
if tt == 872:
self.set_count_set(d.getBoolean())
continue
if tt == 912:
self.set_combined_log_regex_set(d.getBoolean())
continue
if tt == 920:
self.set_host_regex_set(d.getBoolean())
continue
if tt == 928:
self.set_replica_index_set(d.getBoolean())
continue
if tt == 936:
self.set_app_logs_per_request_set(d.getBoolean())
continue
if tt == 944:
self.set_num_shards_set(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
cnt=0
for e in self.version_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("version_id%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
cnt=0
for e in self.module_version_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("module_version%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt64(self.start_time_))
if self.has_start_time_set_: res+=prefix+("start_time_set: %s\n" % self.DebugFormatBool(self.start_time_set_))
if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt64(self.end_time_))
if self.has_end_time_set_: res+=prefix+("end_time_set: %s\n" % self.DebugFormatBool(self.end_time_set_))
if self.has_offset_:
res+=prefix+"offset <\n"
res+=self.offset_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt=0
for e in self.request_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("request_id%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_minimum_log_level_: res+=prefix+("minimum_log_level: %s\n" % self.DebugFormatInt32(self.minimum_log_level_))
if self.has_minimum_log_level_set_: res+=prefix+("minimum_log_level_set: %s\n" % self.DebugFormatBool(self.minimum_log_level_set_))
if self.has_include_incomplete_: res+=prefix+("include_incomplete: %s\n" % self.DebugFormatBool(self.include_incomplete_))
if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt64(self.count_))
if self.has_count_set_: res+=prefix+("count_set: %s\n" % self.DebugFormatBool(self.count_set_))
if self.has_combined_log_regex_: res+=prefix+("combined_log_regex: %s\n" % self.DebugFormatString(self.combined_log_regex_))
if self.has_combined_log_regex_set_: res+=prefix+("combined_log_regex_set: %s\n" % self.DebugFormatBool(self.combined_log_regex_set_))
if self.has_host_regex_: res+=prefix+("host_regex: %s\n" % self.DebugFormatString(self.host_regex_))
if self.has_host_regex_set_: res+=prefix+("host_regex_set: %s\n" % self.DebugFormatBool(self.host_regex_set_))
if self.has_replica_index_: res+=prefix+("replica_index: %s\n" % self.DebugFormatInt32(self.replica_index_))
if self.has_replica_index_set_: res+=prefix+("replica_index_set: %s\n" % self.DebugFormatBool(self.replica_index_set_))
if self.has_include_app_logs_: res+=prefix+("include_app_logs: %s\n" % self.DebugFormatBool(self.include_app_logs_))
if self.has_app_logs_per_request_: res+=prefix+("app_logs_per_request: %s\n" % self.DebugFormatInt32(self.app_logs_per_request_))
if self.has_app_logs_per_request_set_: res+=prefix+("app_logs_per_request_set: %s\n" % self.DebugFormatBool(self.app_logs_per_request_set_))
if self.has_include_host_: res+=prefix+("include_host: %s\n" % self.DebugFormatBool(self.include_host_))
if self.has_include_all_: res+=prefix+("include_all: %s\n" % self.DebugFormatBool(self.include_all_))
if self.has_cache_iterator_: res+=prefix+("cache_iterator: %s\n" % self.DebugFormatBool(self.cache_iterator_))
if self.has_num_shards_: res+=prefix+("num_shards: %s\n" % self.DebugFormatInt32(self.num_shards_))
if self.has_num_shards_set_: res+=prefix+("num_shards_set: %s\n" % self.DebugFormatBool(self.num_shards_set_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kapp_id = 1
kversion_id = 2
kmodule_version = 19
kstart_time = 3
kstart_time_set = 103
kend_time = 4
kend_time_set = 104
koffset = 5
krequest_id = 6
kminimum_log_level = 7
kminimum_log_level_set = 107
kinclude_incomplete = 8
kcount = 9
kcount_set = 109
kcombined_log_regex = 14
kcombined_log_regex_set = 114
khost_regex = 15
khost_regex_set = 115
kreplica_index = 16
kreplica_index_set = 116
kinclude_app_logs = 10
kapp_logs_per_request = 17
kapp_logs_per_request_set = 117
kinclude_host = 11
kinclude_all = 12
kcache_iterator = 13
knum_shards = 18
knum_shards_set = 118
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "version_id",
3: "start_time",
4: "end_time",
5: "offset",
6: "request_id",
7: "minimum_log_level",
8: "include_incomplete",
9: "count",
10: "include_app_logs",
11: "include_host",
12: "include_all",
13: "cache_iterator",
14: "combined_log_regex",
15: "host_regex",
16: "replica_index",
17: "app_logs_per_request",
18: "num_shards",
19: "module_version",
103: "start_time_set",
104: "end_time_set",
107: "minimum_log_level_set",
109: "count_set",
114: "combined_log_regex_set",
115: "host_regex_set",
116: "replica_index_set",
117: "app_logs_per_request_set",
118: "num_shards_set",
}, 118)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.STRING,
6: ProtocolBuffer.Encoder.STRING,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
9: ProtocolBuffer.Encoder.NUMERIC,
10: ProtocolBuffer.Encoder.NUMERIC,
11: ProtocolBuffer.Encoder.NUMERIC,
12: ProtocolBuffer.Encoder.NUMERIC,
13: ProtocolBuffer.Encoder.NUMERIC,
14: ProtocolBuffer.Encoder.STRING,
15: ProtocolBuffer.Encoder.STRING,
16: ProtocolBuffer.Encoder.NUMERIC,
17: ProtocolBuffer.Encoder.NUMERIC,
18: ProtocolBuffer.Encoder.NUMERIC,
19: ProtocolBuffer.Encoder.STRING,
103: ProtocolBuffer.Encoder.NUMERIC,
104: ProtocolBuffer.Encoder.NUMERIC,
107: ProtocolBuffer.Encoder.NUMERIC,
109: ProtocolBuffer.Encoder.NUMERIC,
114: ProtocolBuffer.Encoder.NUMERIC,
115: ProtocolBuffer.Encoder.NUMERIC,
116: ProtocolBuffer.Encoder.NUMERIC,
117: ProtocolBuffer.Encoder.NUMERIC,
118: ProtocolBuffer.Encoder.NUMERIC,
}, 118, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogReadRequest'
class LogReadResponse(ProtocolBuffer.ProtocolMessage):
has_offset_ = 0
offset_ = None
has_last_end_time_ = 0
last_end_time_ = 0
def __init__(self, contents=None):
self.log_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def log_size(self): return len(self.log_)
def log_list(self): return self.log_
def log(self, i):
return self.log_[i]
def mutable_log(self, i):
return self.log_[i]
def add_log(self):
x = RequestLog()
self.log_.append(x)
return x
def clear_log(self):
self.log_ = []
def offset(self):
if self.offset_ is None:
self.lazy_init_lock_.acquire()
try:
if self.offset_ is None: self.offset_ = LogOffset()
finally:
self.lazy_init_lock_.release()
return self.offset_
def mutable_offset(self): self.has_offset_ = 1; return self.offset()
def clear_offset(self):
if self.has_offset_:
self.has_offset_ = 0;
if self.offset_ is not None: self.offset_.Clear()
def has_offset(self): return self.has_offset_
def last_end_time(self): return self.last_end_time_
def set_last_end_time(self, x):
self.has_last_end_time_ = 1
self.last_end_time_ = x
def clear_last_end_time(self):
if self.has_last_end_time_:
self.has_last_end_time_ = 0
self.last_end_time_ = 0
def has_last_end_time(self): return self.has_last_end_time_
def MergeFrom(self, x):
assert x is not self
for i in range(x.log_size()): self.add_log().CopyFrom(x.log(i))
if (x.has_offset()): self.mutable_offset().MergeFrom(x.offset())
if (x.has_last_end_time()): self.set_last_end_time(x.last_end_time())
def Equals(self, x):
if x is self: return 1
if len(self.log_) != len(x.log_): return 0
for e1, e2 in zip(self.log_, x.log_):
if e1 != e2: return 0
if self.has_offset_ != x.has_offset_: return 0
if self.has_offset_ and self.offset_ != x.offset_: return 0
if self.has_last_end_time_ != x.has_last_end_time_: return 0
if self.has_last_end_time_ and self.last_end_time_ != x.last_end_time_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.log_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_offset_ and not self.offset_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.log_)
for i in range(len(self.log_)): n += self.lengthString(self.log_[i].ByteSize())
if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSize())
if (self.has_last_end_time_): n += 1 + self.lengthVarInt64(self.last_end_time_)
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.log_)
for i in range(len(self.log_)): n += self.lengthString(self.log_[i].ByteSizePartial())
if (self.has_offset_): n += 1 + self.lengthString(self.offset_.ByteSizePartial())
if (self.has_last_end_time_): n += 1 + self.lengthVarInt64(self.last_end_time_)
return n
def Clear(self):
self.clear_log()
self.clear_offset()
self.clear_last_end_time()
def OutputUnchecked(self, out):
for i in range(len(self.log_)):
out.putVarInt32(10)
out.putVarInt32(self.log_[i].ByteSize())
self.log_[i].OutputUnchecked(out)
if (self.has_offset_):
out.putVarInt32(18)
out.putVarInt32(self.offset_.ByteSize())
self.offset_.OutputUnchecked(out)
if (self.has_last_end_time_):
out.putVarInt32(24)
out.putVarInt64(self.last_end_time_)
def OutputPartial(self, out):
for i in range(len(self.log_)):
out.putVarInt32(10)
out.putVarInt32(self.log_[i].ByteSizePartial())
self.log_[i].OutputPartial(out)
if (self.has_offset_):
out.putVarInt32(18)
out.putVarInt32(self.offset_.ByteSizePartial())
self.offset_.OutputPartial(out)
if (self.has_last_end_time_):
out.putVarInt32(24)
out.putVarInt64(self.last_end_time_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_log().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_offset().TryMerge(tmp)
continue
if tt == 24:
self.set_last_end_time(d.getVarInt64())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.log_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("log%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_offset_:
res+=prefix+"offset <\n"
res+=self.offset_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
if self.has_last_end_time_: res+=prefix+("last_end_time: %s\n" % self.DebugFormatInt64(self.last_end_time_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
klog = 1
koffset = 2
klast_end_time = 3
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "log",
2: "offset",
3: "last_end_time",
}, 3)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
}, 3, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogReadResponse'
class LogUsageRecord(ProtocolBuffer.ProtocolMessage):
has_version_id_ = 0
version_id_ = ""
has_start_time_ = 0
start_time_ = 0
has_end_time_ = 0
end_time_ = 0
has_count_ = 0
count_ = 0
has_total_size_ = 0
total_size_ = 0
has_records_ = 0
records_ = 0
def __init__(self, contents=None):
if contents is not None: self.MergeFromString(contents)
def version_id(self): return self.version_id_
def set_version_id(self, x):
self.has_version_id_ = 1
self.version_id_ = x
def clear_version_id(self):
if self.has_version_id_:
self.has_version_id_ = 0
self.version_id_ = ""
def has_version_id(self): return self.has_version_id_
def start_time(self): return self.start_time_
def set_start_time(self, x):
self.has_start_time_ = 1
self.start_time_ = x
def clear_start_time(self):
if self.has_start_time_:
self.has_start_time_ = 0
self.start_time_ = 0
def has_start_time(self): return self.has_start_time_
def end_time(self): return self.end_time_
def set_end_time(self, x):
self.has_end_time_ = 1
self.end_time_ = x
def clear_end_time(self):
if self.has_end_time_:
self.has_end_time_ = 0
self.end_time_ = 0
def has_end_time(self): return self.has_end_time_
def count(self): return self.count_
def set_count(self, x):
self.has_count_ = 1
self.count_ = x
def clear_count(self):
if self.has_count_:
self.has_count_ = 0
self.count_ = 0
def has_count(self): return self.has_count_
def total_size(self): return self.total_size_
def set_total_size(self, x):
self.has_total_size_ = 1
self.total_size_ = x
def clear_total_size(self):
if self.has_total_size_:
self.has_total_size_ = 0
self.total_size_ = 0
def has_total_size(self): return self.has_total_size_
def records(self): return self.records_
def set_records(self, x):
self.has_records_ = 1
self.records_ = x
def clear_records(self):
if self.has_records_:
self.has_records_ = 0
self.records_ = 0
def has_records(self): return self.has_records_
def MergeFrom(self, x):
assert x is not self
if (x.has_version_id()): self.set_version_id(x.version_id())
if (x.has_start_time()): self.set_start_time(x.start_time())
if (x.has_end_time()): self.set_end_time(x.end_time())
if (x.has_count()): self.set_count(x.count())
if (x.has_total_size()): self.set_total_size(x.total_size())
if (x.has_records()): self.set_records(x.records())
def Equals(self, x):
if x is self: return 1
if self.has_version_id_ != x.has_version_id_: return 0
if self.has_version_id_ and self.version_id_ != x.version_id_: return 0
if self.has_start_time_ != x.has_start_time_: return 0
if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
if self.has_end_time_ != x.has_end_time_: return 0
if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
if self.has_count_ != x.has_count_: return 0
if self.has_count_ and self.count_ != x.count_: return 0
if self.has_total_size_ != x.has_total_size_: return 0
if self.has_total_size_ and self.total_size_ != x.total_size_: return 0
if self.has_records_ != x.has_records_: return 0
if self.has_records_ and self.records_ != x.records_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
return initialized
def ByteSize(self):
n = 0
if (self.has_version_id_): n += 1 + self.lengthString(len(self.version_id_))
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_total_size_): n += 1 + self.lengthVarInt64(self.total_size_)
if (self.has_records_): n += 1 + self.lengthVarInt64(self.records_)
return n
def ByteSizePartial(self):
n = 0
if (self.has_version_id_): n += 1 + self.lengthString(len(self.version_id_))
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_count_): n += 1 + self.lengthVarInt64(self.count_)
if (self.has_total_size_): n += 1 + self.lengthVarInt64(self.total_size_)
if (self.has_records_): n += 1 + self.lengthVarInt64(self.records_)
return n
def Clear(self):
self.clear_version_id()
self.clear_start_time()
self.clear_end_time()
self.clear_count()
self.clear_total_size()
self.clear_records()
def OutputUnchecked(self, out):
if (self.has_version_id_):
out.putVarInt32(10)
out.putPrefixedString(self.version_id_)
if (self.has_start_time_):
out.putVarInt32(16)
out.putVarInt32(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(24)
out.putVarInt32(self.end_time_)
if (self.has_count_):
out.putVarInt32(32)
out.putVarInt64(self.count_)
if (self.has_total_size_):
out.putVarInt32(40)
out.putVarInt64(self.total_size_)
if (self.has_records_):
out.putVarInt32(48)
out.putVarInt32(self.records_)
def OutputPartial(self, out):
if (self.has_version_id_):
out.putVarInt32(10)
out.putPrefixedString(self.version_id_)
if (self.has_start_time_):
out.putVarInt32(16)
out.putVarInt32(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(24)
out.putVarInt32(self.end_time_)
if (self.has_count_):
out.putVarInt32(32)
out.putVarInt64(self.count_)
if (self.has_total_size_):
out.putVarInt32(40)
out.putVarInt64(self.total_size_)
if (self.has_records_):
out.putVarInt32(48)
out.putVarInt32(self.records_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_version_id(d.getPrefixedString())
continue
if tt == 16:
self.set_start_time(d.getVarInt32())
continue
if tt == 24:
self.set_end_time(d.getVarInt32())
continue
if tt == 32:
self.set_count(d.getVarInt64())
continue
if tt == 40:
self.set_total_size(d.getVarInt64())
continue
if tt == 48:
self.set_records(d.getVarInt32())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_version_id_: res+=prefix+("version_id: %s\n" % self.DebugFormatString(self.version_id_))
if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt32(self.start_time_))
if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt32(self.end_time_))
if self.has_count_: res+=prefix+("count: %s\n" % self.DebugFormatInt64(self.count_))
if self.has_total_size_: res+=prefix+("total_size: %s\n" % self.DebugFormatInt64(self.total_size_))
if self.has_records_: res+=prefix+("records: %s\n" % self.DebugFormatInt32(self.records_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kversion_id = 1
kstart_time = 2
kend_time = 3
kcount = 4
ktotal_size = 5
krecords = 6
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "version_id",
2: "start_time",
3: "end_time",
4: "count",
5: "total_size",
6: "records",
}, 6)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.NUMERIC,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
}, 6, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogUsageRecord'
class LogUsageRequest(ProtocolBuffer.ProtocolMessage):
has_app_id_ = 0
app_id_ = ""
has_start_time_ = 0
start_time_ = 0
has_end_time_ = 0
end_time_ = 0
has_resolution_hours_ = 0
resolution_hours_ = 1
has_resolution_hours_set_ = 0
resolution_hours_set_ = 0
has_combine_versions_ = 0
combine_versions_ = 0
has_usage_version_ = 0
usage_version_ = 0
has_usage_version_set_ = 0
usage_version_set_ = 0
has_versions_only_ = 0
versions_only_ = 0
def __init__(self, contents=None):
self.version_id_ = []
if contents is not None: self.MergeFromString(contents)
def app_id(self): return self.app_id_
def set_app_id(self, x):
self.has_app_id_ = 1
self.app_id_ = x
def clear_app_id(self):
if self.has_app_id_:
self.has_app_id_ = 0
self.app_id_ = ""
def has_app_id(self): return self.has_app_id_
def version_id_size(self): return len(self.version_id_)
def version_id_list(self): return self.version_id_
def version_id(self, i):
return self.version_id_[i]
def set_version_id(self, i, x):
self.version_id_[i] = x
def add_version_id(self, x):
self.version_id_.append(x)
def clear_version_id(self):
self.version_id_ = []
def start_time(self): return self.start_time_
def set_start_time(self, x):
self.has_start_time_ = 1
self.start_time_ = x
def clear_start_time(self):
if self.has_start_time_:
self.has_start_time_ = 0
self.start_time_ = 0
def has_start_time(self): return self.has_start_time_
def end_time(self): return self.end_time_
def set_end_time(self, x):
self.has_end_time_ = 1
self.end_time_ = x
def clear_end_time(self):
if self.has_end_time_:
self.has_end_time_ = 0
self.end_time_ = 0
def has_end_time(self): return self.has_end_time_
def resolution_hours(self): return self.resolution_hours_
def set_resolution_hours(self, x):
self.has_resolution_hours_ = 1
self.resolution_hours_ = x
def clear_resolution_hours(self):
if self.has_resolution_hours_:
self.has_resolution_hours_ = 0
self.resolution_hours_ = 1
def has_resolution_hours(self): return self.has_resolution_hours_
def resolution_hours_set(self): return self.resolution_hours_set_
def set_resolution_hours_set(self, x):
self.has_resolution_hours_set_ = 1
self.resolution_hours_set_ = x
def clear_resolution_hours_set(self):
if self.has_resolution_hours_set_:
self.has_resolution_hours_set_ = 0
self.resolution_hours_set_ = 0
def has_resolution_hours_set(self): return self.has_resolution_hours_set_
def combine_versions(self): return self.combine_versions_
def set_combine_versions(self, x):
self.has_combine_versions_ = 1
self.combine_versions_ = x
def clear_combine_versions(self):
if self.has_combine_versions_:
self.has_combine_versions_ = 0
self.combine_versions_ = 0
def has_combine_versions(self): return self.has_combine_versions_
def usage_version(self): return self.usage_version_
def set_usage_version(self, x):
self.has_usage_version_ = 1
self.usage_version_ = x
def clear_usage_version(self):
if self.has_usage_version_:
self.has_usage_version_ = 0
self.usage_version_ = 0
def has_usage_version(self): return self.has_usage_version_
def usage_version_set(self): return self.usage_version_set_
def set_usage_version_set(self, x):
self.has_usage_version_set_ = 1
self.usage_version_set_ = x
def clear_usage_version_set(self):
if self.has_usage_version_set_:
self.has_usage_version_set_ = 0
self.usage_version_set_ = 0
def has_usage_version_set(self): return self.has_usage_version_set_
def versions_only(self): return self.versions_only_
def set_versions_only(self, x):
self.has_versions_only_ = 1
self.versions_only_ = x
def clear_versions_only(self):
if self.has_versions_only_:
self.has_versions_only_ = 0
self.versions_only_ = 0
def has_versions_only(self): return self.has_versions_only_
def MergeFrom(self, x):
assert x is not self
if (x.has_app_id()): self.set_app_id(x.app_id())
for i in range(x.version_id_size()): self.add_version_id(x.version_id(i))
if (x.has_start_time()): self.set_start_time(x.start_time())
if (x.has_end_time()): self.set_end_time(x.end_time())
if (x.has_resolution_hours()): self.set_resolution_hours(x.resolution_hours())
if (x.has_resolution_hours_set()): self.set_resolution_hours_set(x.resolution_hours_set())
if (x.has_combine_versions()): self.set_combine_versions(x.combine_versions())
if (x.has_usage_version()): self.set_usage_version(x.usage_version())
if (x.has_usage_version_set()): self.set_usage_version_set(x.usage_version_set())
if (x.has_versions_only()): self.set_versions_only(x.versions_only())
def Equals(self, x):
if x is self: return 1
if self.has_app_id_ != x.has_app_id_: return 0
if self.has_app_id_ and self.app_id_ != x.app_id_: return 0
if len(self.version_id_) != len(x.version_id_): return 0
for e1, e2 in zip(self.version_id_, x.version_id_):
if e1 != e2: return 0
if self.has_start_time_ != x.has_start_time_: return 0
if self.has_start_time_ and self.start_time_ != x.start_time_: return 0
if self.has_end_time_ != x.has_end_time_: return 0
if self.has_end_time_ and self.end_time_ != x.end_time_: return 0
if self.has_resolution_hours_ != x.has_resolution_hours_: return 0
if self.has_resolution_hours_ and self.resolution_hours_ != x.resolution_hours_: return 0
if self.has_resolution_hours_set_ != x.has_resolution_hours_set_: return 0
if self.has_resolution_hours_set_ and self.resolution_hours_set_ != x.resolution_hours_set_: return 0
if self.has_combine_versions_ != x.has_combine_versions_: return 0
if self.has_combine_versions_ and self.combine_versions_ != x.combine_versions_: return 0
if self.has_usage_version_ != x.has_usage_version_: return 0
if self.has_usage_version_ and self.usage_version_ != x.usage_version_: return 0
if self.has_usage_version_set_ != x.has_usage_version_set_: return 0
if self.has_usage_version_set_ and self.usage_version_set_ != x.usage_version_set_: return 0
if self.has_versions_only_ != x.has_versions_only_: return 0
if self.has_versions_only_ and self.versions_only_ != x.versions_only_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
if (not self.has_app_id_):
initialized = 0
if debug_strs is not None:
debug_strs.append('Required field: app_id not set.')
return initialized
def ByteSize(self):
n = 0
n += self.lengthString(len(self.app_id_))
n += 1 * len(self.version_id_)
for i in range(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_resolution_hours_): n += 1 + self.lengthVarInt64(self.resolution_hours_)
if (self.has_resolution_hours_set_): n += 3
if (self.has_combine_versions_): n += 2
if (self.has_usage_version_): n += 1 + self.lengthVarInt64(self.usage_version_)
if (self.has_usage_version_set_): n += 3
if (self.has_versions_only_): n += 2
return n + 1
def ByteSizePartial(self):
n = 0
if (self.has_app_id_):
n += 1
n += self.lengthString(len(self.app_id_))
n += 1 * len(self.version_id_)
for i in range(len(self.version_id_)): n += self.lengthString(len(self.version_id_[i]))
if (self.has_start_time_): n += 1 + self.lengthVarInt64(self.start_time_)
if (self.has_end_time_): n += 1 + self.lengthVarInt64(self.end_time_)
if (self.has_resolution_hours_): n += 1 + self.lengthVarInt64(self.resolution_hours_)
if (self.has_resolution_hours_set_): n += 3
if (self.has_combine_versions_): n += 2
if (self.has_usage_version_): n += 1 + self.lengthVarInt64(self.usage_version_)
if (self.has_usage_version_set_): n += 3
if (self.has_versions_only_): n += 2
return n
def Clear(self):
self.clear_app_id()
self.clear_version_id()
self.clear_start_time()
self.clear_end_time()
self.clear_resolution_hours()
self.clear_resolution_hours_set()
self.clear_combine_versions()
self.clear_usage_version()
self.clear_usage_version_set()
self.clear_versions_only()
def OutputUnchecked(self, out):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in range(len(self.version_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_[i])
if (self.has_start_time_):
out.putVarInt32(24)
out.putVarInt32(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(32)
out.putVarInt32(self.end_time_)
if (self.has_resolution_hours_):
out.putVarInt32(40)
out.putVarUint64(self.resolution_hours_)
if (self.has_combine_versions_):
out.putVarInt32(48)
out.putBoolean(self.combine_versions_)
if (self.has_usage_version_):
out.putVarInt32(56)
out.putVarInt32(self.usage_version_)
if (self.has_versions_only_):
out.putVarInt32(64)
out.putBoolean(self.versions_only_)
if (self.has_resolution_hours_set_):
out.putVarInt32(840)
out.putBoolean(self.resolution_hours_set_)
if (self.has_usage_version_set_):
out.putVarInt32(856)
out.putBoolean(self.usage_version_set_)
def OutputPartial(self, out):
if (self.has_app_id_):
out.putVarInt32(10)
out.putPrefixedString(self.app_id_)
for i in range(len(self.version_id_)):
out.putVarInt32(18)
out.putPrefixedString(self.version_id_[i])
if (self.has_start_time_):
out.putVarInt32(24)
out.putVarInt32(self.start_time_)
if (self.has_end_time_):
out.putVarInt32(32)
out.putVarInt32(self.end_time_)
if (self.has_resolution_hours_):
out.putVarInt32(40)
out.putVarUint64(self.resolution_hours_)
if (self.has_combine_versions_):
out.putVarInt32(48)
out.putBoolean(self.combine_versions_)
if (self.has_usage_version_):
out.putVarInt32(56)
out.putVarInt32(self.usage_version_)
if (self.has_versions_only_):
out.putVarInt32(64)
out.putBoolean(self.versions_only_)
if (self.has_resolution_hours_set_):
out.putVarInt32(840)
out.putBoolean(self.resolution_hours_set_)
if (self.has_usage_version_set_):
out.putVarInt32(856)
out.putBoolean(self.usage_version_set_)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
self.set_app_id(d.getPrefixedString())
continue
if tt == 18:
self.add_version_id(d.getPrefixedString())
continue
if tt == 24:
self.set_start_time(d.getVarInt32())
continue
if tt == 32:
self.set_end_time(d.getVarInt32())
continue
if tt == 40:
self.set_resolution_hours(d.getVarUint64())
continue
if tt == 48:
self.set_combine_versions(d.getBoolean())
continue
if tt == 56:
self.set_usage_version(d.getVarInt32())
continue
if tt == 64:
self.set_versions_only(d.getBoolean())
continue
if tt == 840:
self.set_resolution_hours_set(d.getBoolean())
continue
if tt == 856:
self.set_usage_version_set(d.getBoolean())
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
if self.has_app_id_: res+=prefix+("app_id: %s\n" % self.DebugFormatString(self.app_id_))
cnt=0
for e in self.version_id_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("version_id%s: %s\n" % (elm, self.DebugFormatString(e)))
cnt+=1
if self.has_start_time_: res+=prefix+("start_time: %s\n" % self.DebugFormatInt32(self.start_time_))
if self.has_end_time_: res+=prefix+("end_time: %s\n" % self.DebugFormatInt32(self.end_time_))
if self.has_resolution_hours_: res+=prefix+("resolution_hours: %s\n" % self.DebugFormatInt64(self.resolution_hours_))
if self.has_resolution_hours_set_: res+=prefix+("resolution_hours_set: %s\n" % self.DebugFormatBool(self.resolution_hours_set_))
if self.has_combine_versions_: res+=prefix+("combine_versions: %s\n" % self.DebugFormatBool(self.combine_versions_))
if self.has_usage_version_: res+=prefix+("usage_version: %s\n" % self.DebugFormatInt32(self.usage_version_))
if self.has_usage_version_set_: res+=prefix+("usage_version_set: %s\n" % self.DebugFormatBool(self.usage_version_set_))
if self.has_versions_only_: res+=prefix+("versions_only: %s\n" % self.DebugFormatBool(self.versions_only_))
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kapp_id = 1
kversion_id = 2
kstart_time = 3
kend_time = 4
kresolution_hours = 5
kresolution_hours_set = 105
kcombine_versions = 6
kusage_version = 7
kusage_version_set = 107
kversions_only = 8
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "app_id",
2: "version_id",
3: "start_time",
4: "end_time",
5: "resolution_hours",
6: "combine_versions",
7: "usage_version",
8: "versions_only",
105: "resolution_hours_set",
107: "usage_version_set",
}, 107)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
3: ProtocolBuffer.Encoder.NUMERIC,
4: ProtocolBuffer.Encoder.NUMERIC,
5: ProtocolBuffer.Encoder.NUMERIC,
6: ProtocolBuffer.Encoder.NUMERIC,
7: ProtocolBuffer.Encoder.NUMERIC,
8: ProtocolBuffer.Encoder.NUMERIC,
105: ProtocolBuffer.Encoder.NUMERIC,
107: ProtocolBuffer.Encoder.NUMERIC,
}, 107, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogUsageRequest'
class LogUsageResponse(ProtocolBuffer.ProtocolMessage):
has_summary_ = 0
summary_ = None
def __init__(self, contents=None):
self.usage_ = []
self.lazy_init_lock_ = thread.allocate_lock()
if contents is not None: self.MergeFromString(contents)
def usage_size(self): return len(self.usage_)
def usage_list(self): return self.usage_
def usage(self, i):
return self.usage_[i]
def mutable_usage(self, i):
return self.usage_[i]
def add_usage(self):
x = LogUsageRecord()
self.usage_.append(x)
return x
def clear_usage(self):
self.usage_ = []
def summary(self):
if self.summary_ is None:
self.lazy_init_lock_.acquire()
try:
if self.summary_ is None: self.summary_ = LogUsageRecord()
finally:
self.lazy_init_lock_.release()
return self.summary_
def mutable_summary(self): self.has_summary_ = 1; return self.summary()
def clear_summary(self):
if self.has_summary_:
self.has_summary_ = 0;
if self.summary_ is not None: self.summary_.Clear()
def has_summary(self): return self.has_summary_
def MergeFrom(self, x):
assert x is not self
for i in range(x.usage_size()): self.add_usage().CopyFrom(x.usage(i))
if (x.has_summary()): self.mutable_summary().MergeFrom(x.summary())
def Equals(self, x):
if x is self: return 1
if len(self.usage_) != len(x.usage_): return 0
for e1, e2 in zip(self.usage_, x.usage_):
if e1 != e2: return 0
if self.has_summary_ != x.has_summary_: return 0
if self.has_summary_ and self.summary_ != x.summary_: return 0
return 1
def IsInitialized(self, debug_strs=None):
initialized = 1
for p in self.usage_:
if not p.IsInitialized(debug_strs): initialized=0
if (self.has_summary_ and not self.summary_.IsInitialized(debug_strs)): initialized = 0
return initialized
def ByteSize(self):
n = 0
n += 1 * len(self.usage_)
for i in range(len(self.usage_)): n += self.lengthString(self.usage_[i].ByteSize())
if (self.has_summary_): n += 1 + self.lengthString(self.summary_.ByteSize())
return n
def ByteSizePartial(self):
n = 0
n += 1 * len(self.usage_)
for i in range(len(self.usage_)): n += self.lengthString(self.usage_[i].ByteSizePartial())
if (self.has_summary_): n += 1 + self.lengthString(self.summary_.ByteSizePartial())
return n
def Clear(self):
self.clear_usage()
self.clear_summary()
def OutputUnchecked(self, out):
for i in range(len(self.usage_)):
out.putVarInt32(10)
out.putVarInt32(self.usage_[i].ByteSize())
self.usage_[i].OutputUnchecked(out)
if (self.has_summary_):
out.putVarInt32(18)
out.putVarInt32(self.summary_.ByteSize())
self.summary_.OutputUnchecked(out)
def OutputPartial(self, out):
for i in range(len(self.usage_)):
out.putVarInt32(10)
out.putVarInt32(self.usage_[i].ByteSizePartial())
self.usage_[i].OutputPartial(out)
if (self.has_summary_):
out.putVarInt32(18)
out.putVarInt32(self.summary_.ByteSizePartial())
self.summary_.OutputPartial(out)
def TryMerge(self, d):
while d.avail() > 0:
tt = d.getVarInt32()
if tt == 10:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.add_usage().TryMerge(tmp)
continue
if tt == 18:
length = d.getVarInt32()
tmp = ProtocolBuffer.Decoder(d.buffer(), d.pos(), d.pos() + length)
d.skip(length)
self.mutable_summary().TryMerge(tmp)
continue
if (tt == 0): raise ProtocolBuffer.ProtocolBufferDecodeError
d.skipData(tt)
def __str__(self, prefix="", printElemNumber=0):
res=""
cnt=0
for e in self.usage_:
elm=""
if printElemNumber: elm="(%d)" % cnt
res+=prefix+("usage%s <\n" % elm)
res+=e.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
cnt+=1
if self.has_summary_:
res+=prefix+"summary <\n"
res+=self.summary_.__str__(prefix + " ", printElemNumber)
res+=prefix+">\n"
return res
def _BuildTagLookupTable(sparse, maxtag, default=None):
return tuple([sparse.get(i, default) for i in range(0, 1+maxtag)])
kusage = 1
ksummary = 2
_TEXT = _BuildTagLookupTable({
0: "ErrorCode",
1: "usage",
2: "summary",
}, 2)
_TYPES = _BuildTagLookupTable({
0: ProtocolBuffer.Encoder.NUMERIC,
1: ProtocolBuffer.Encoder.STRING,
2: ProtocolBuffer.Encoder.STRING,
}, 2, ProtocolBuffer.Encoder.MAX_TYPE)
_STYLE = """"""
_STYLE_CONTENT_TYPE = """"""
_PROTO_DESCRIPTOR_NAME = 'apphosting.LogUsageResponse'
if _extension_runtime:
pass
__all__ = ['LogServiceError','UserAppLogLine','UserAppLogGroup','FlushRequest','SetStatusRequest','LogOffset','LogLine','RequestLog','LogModuleVersion','LogReadRequest','LogReadResponse','LogUsageRecord','LogUsageRequest','LogUsageResponse']
| {
"content_hash": "26a6450b3f8597f7c20b16f0255f130f",
"timestamp": "",
"source": "github",
"line_count": 4803,
"max_line_length": 241,
"avg_line_length": 32.6456381428274,
"alnum_prop": 0.640369394822605,
"repo_name": "Suwmlee/XX-Net",
"id": "1d0aaf2a2d4a5a963fee58e3f0a27b8927d4cea1",
"size": "157401",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3",
"path": "gae_proxy/server/lib/google/appengine/api/logservice/log_service_pb.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "200"
},
{
"name": "C",
"bytes": "33097"
},
{
"name": "CSS",
"bytes": "86345"
},
{
"name": "HTML",
"bytes": "141382"
},
{
"name": "JavaScript",
"bytes": "345991"
},
{
"name": "PHP",
"bytes": "10671"
},
{
"name": "Python",
"bytes": "17312939"
},
{
"name": "Shell",
"bytes": "4647"
},
{
"name": "Visual Basic",
"bytes": "382"
}
],
"symlink_target": ""
} |
"""Tests module."""
from unittest import mock
import pytest
from httpx import AsyncClient
from .application import app, container
from .services import Service
@pytest.fixture
def client(event_loop):
client = AsyncClient(app=app, base_url="http://test")
yield client
event_loop.run_until_complete(client.aclose())
@pytest.mark.asyncio
async def test_index(client):
service_mock = mock.AsyncMock(spec=Service)
service_mock.process.return_value = "Foo"
with container.service.override(service_mock):
response = await client.get("/")
assert response.status_code == 200
assert response.json() == {"result": "Foo"}
| {
"content_hash": "49dbd651fc60a4b749b03c95a531f2bc",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 57,
"avg_line_length": 23.535714285714285,
"alnum_prop": 0.7101669195751138,
"repo_name": "ets-labs/python-dependency-injector",
"id": "bde075abe6376e1cae2b4628877e40e5c0fce3dc",
"size": "659",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/miniapps/fastapi-redis/fastapiredis/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "201812"
},
{
"name": "Makefile",
"bytes": "1942"
},
{
"name": "Python",
"bytes": "492977"
}
],
"symlink_target": ""
} |
from itertools import chain
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from Orange.widgets import gui
from Orange.widgets.widget import OWWidget
from Orange.widgets.settings import Setting
from Orange.widgets.utils.saveplot import save_plot
from Orange.widgets.io import FileFormat
DefDroppletBrush = QBrush(Qt.darkGray)
class GraphNode:
def __init__(self, *_, **kwargs):
self.edges = kwargs.get("edges", set())
def graph_edges(self):
return self.edges
def graph_add_edge(self, edge):
self.edges.add(edge)
def __iter__(self):
for edge in self.edges:
yield edge.node2
def graph_nodes(self, atype=1):
pass
class GraphEdge:
def __init__(self, node1=None, node2=None, atype=1):
self.node1 = node1
self.node2 = node2
self.type = atype
if node1 is not None:
node1.graph_add_edge(self)
if node2 is not None:
node2.graph_add_edge(self)
class GraphicsDroplet(QGraphicsEllipseItem):
def __init__(self, *args):
super().__init__(*args)
self.setAcceptHoverEvents(True)
self.setAcceptedMouseButtons(Qt.LeftButton)
self.setBrush(QBrush(Qt.gray))
self.setPen(Qt.white)
def hoverEnterEvent(self, event):
super().hoverEnterEvent(event)
self.setBrush(QBrush(QColor(100, 100, 100)))
self.update()
def hoverLeaveEvent(self, event):
super().hoverLeaveEvent(event)
self.setBrush(QBrush(QColor(200, 200, 200)))
self.update()
def mousePressEvent(self, event):
super().mousePressEvent(event)
self.parentItem().set_open(not self.parentItem().isOpen)
if self.scene():
self.scene().fix_pos()
# noinspection PyPep8Naming
class TextTreeNode(QGraphicsTextItem, GraphNode):
def setBackgroundBrush(self, brush):
if self._background_brush != brush:
self._background_brush = QBrush(brush)
color = brush.color()
r, g, b, _ = color.getRgb()
lum = 0.2126 * r + 0.7152 * g + 0.0722 * b
if lum > 100:
self.setDefaultTextColor(Qt.black)
else:
self.setDefaultTextColor(Qt.white)
self.update()
def backgroundBrush(self):
brush = getattr(self, "_background_brush",
getattr(self.scene(), "defaultItemBrush", Qt.NoBrush))
return QBrush(brush)
backgroundBrush = pyqtProperty(
"QBrush", fget=backgroundBrush, fset=setBackgroundBrush,
doc="Background brush")
def __init__(self, tree, parent, *args, **kwargs):
QGraphicsTextItem.__init__(self, *args)
GraphNode.__init__(self, **kwargs)
self._background_brush = None
self._rect = None
self.tree = tree
self.parent = parent
font = self.font()
font.setPointSize(10)
self.setFont(font)
self.droplet = GraphicsDroplet(-5, 0, 10, 10, self)
self.droplet.setPos(self.rect().center().x(), self.rect().height())
self.document().contentsChanged.connect(self.update_contents)
self.isOpen = True
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
def setHtml(self, html):
return super().setHtml("<body>" + html + "</body>")
def update_contents(self):
self.setTextWidth(-1)
self.setTextWidth(self.document().idealWidth())
self.droplet.setPos(self.rect().center().x(), self.rect().height())
self.droplet.setVisible(bool(self.branches))
def set_rect(self, rect):
self.prepareGeometryChange()
rect = QRectF() if rect is None else rect
self._rect = rect
self.update_contents()
self.update()
def shape(self):
path = QPainterPath()
path.addRect(self.boundingRect())
return path
def rect(self):
if getattr(self, "_rect", QRectF()).isValid():
return self._rect
else:
return QRectF(QPointF(0, 0), self.document().size()) | \
getattr(self, "_rect", QRectF(0, 0, 1, 1))
def boundingRect(self):
return self._rect if getattr(self, "_rect", QRectF()).isValid() \
else super().boundingRect()
@property
def branches(self):
return [edge.node2 for edge in self.graph_edges() if edge.node1 is self]
def paint(self, painter, option, widget=0):
painter.save()
painter.setBrush(self.backgroundBrush)
painter.setPen(QPen(Qt.gray))
rect = self.rect()
painter.drawRoundedRect(rect, 4, 4)
painter.restore()
painter.setClipRect(rect)
return QGraphicsTextItem.paint(self, painter, option, widget)
class GraphicsNode(TextTreeNode):
def graph_traverse_bf(self):
visited = set()
queue = list(self)
while queue:
node = queue.pop(0)
if node not in visited:
yield node
visited.add(node)
if node.isOpen:
queue.extend(list(node))
def set_open(self, do_open):
self.isOpen = do_open
for node in self.graph_traverse_bf():
if node is not self:
node.setVisible(do_open)
def itemChange(self, change, value):
if change in [QGraphicsItem.ItemPositionHasChanged,
QGraphicsItem.ItemVisibleHasChanged]:
self.update_edge()
return super().itemChange(change, value)
# noinspection PyCallByClass,PyTypeChecker
def update_edge(self):
for edge in self.edges:
if edge.node1 is self:
QTimer.singleShot(0, edge.update_ends)
elif edge.node2 is self:
edge.setVisible(self.isVisible())
def edge_in_point(self, edge):
return edge.mapFromItem(
self, QPointF(self.rect().center().x(), self.rect().y()))
def edge_out_point(self, edge):
return edge.mapFromItem(self.droplet, self.droplet.rect().center())
def paint(self, painter, option, widget=0):
if self.isSelected():
option.state ^= QStyle.State_Selected
if self.isSelected():
rect = self.rect()
painter.save()
painter.setBrush(QBrush(QColor(125, 162, 206, 192)))
painter.drawRoundedRect(rect.adjusted(-4, -4, 4, 4), 10, 10)
painter.restore()
super().paint(painter, option, widget)
def boundingRect(self):
return super().boundingRect().adjusted(-5, -5, 5, 5)
def mousePressEvent(self, event):
return super().mousePressEvent(event)
class GraphicsEdge(QGraphicsLineItem, GraphEdge):
def __init__(self, *args, **kwargs):
QGraphicsLineItem.__init__(self, *args)
GraphEdge.__init__(self, **kwargs)
self.setZValue(-30)
def update_ends(self):
try:
self.prepareGeometryChange()
self.setLine(QLineF(self.node1.edge_out_point(self),
self.node2.edge_in_point(self)))
except RuntimeError: # this gets called through QTimer.singleShot
# and might already be deleted by Qt
pass
class TreeGraphicsView(QGraphicsView):
resized = pyqtSignal(QSize, name="resized")
def __init__(self, scene, *args):
super().__init__(scene, *args)
self.viewport().setMouseTracking(True)
self.setFocusPolicy(Qt.WheelFocus)
self.setRenderHint(QPainter.Antialiasing)
self.setRenderHint(QPainter.TextAntialiasing)
self.setRenderHint(QPainter.HighQualityAntialiasing)
def resizeEvent(self, event):
super().resizeEvent(event)
self.resized.emit(self.size())
class TreeGraphicsScene(QGraphicsScene):
_HSPACING = 10
_VSPACING = 10
def __init__(self, master, *args):
super().__init__(*args)
self.master = master
self.nodeList = []
self.edgeList = []
self.gx = self.gy = 10
def fix_pos(self, node=None, x=10, y=10):
self.gx, self.gy = x, y
if not node:
if self.nodes():
node = [node for node in self.nodes() if not node.parent][0]
else:
return
if not x or not y:
x, y = self._HSPACING, self._VSPACING
self._fix_pos(node, x, y)
rect = self.itemsBoundingRect().adjusted(-10, -10, 20, 20)
self.setSceneRect(rect)
self.update()
def _fix_pos(self, node, x, y):
def brect(node):
return node.boundingRect() | node.childrenBoundingRect()
if node.branches and node.isOpen:
for n in node.branches:
x, ry = self._fix_pos(n, x,
y + self._VSPACING + brect(node).height())
x = (node.branches[0].pos().x() + node.branches[-1].pos().x()) / 2
node.setPos(x, y)
for e in node.edges:
e.update_ends()
else:
node.setPos(self.gx, y)
self.gx += self._HSPACING + brect(node).width()
x += self._HSPACING + brect(node).width()
self.gy = max(y, self.gy)
return x, y
def mouseMoveEvent(self, event):
return QGraphicsScene.mouseMoveEvent(self, event)
def mousePressEvent(self, event):
return QGraphicsScene.mousePressEvent(self, event)
def edges(self):
return [item for item in self.items() if isinstance(item, GraphEdge)]
def nodes(self):
return [item for item in self.items() if isinstance(item, GraphNode)]
class TreeNavigator(QGraphicsView):
def __init__(self, master_view, *_):
super().__init__()
self.master_view = master_view
self.setScene(self.master_view.scene())
self.scene().sceneRectChanged.connect(self.updateSceneRect)
self.master_view.resized.connect(self.update_view)
self.setRenderHint(QPainter.Antialiasing)
def mousePressEvent(self, event):
if event.buttons() & Qt.LeftButton:
self.master_view.centerOn(self.mapToScene(event.pos()))
self.update_view()
return super().mousePressEvenr(event)
def mouseMoveEvent(self, event):
if event.buttons() & Qt.LeftButton:
self.master_view.centerOn(self.mapToScene(event.pos()))
self.update_view()
return super().mouseMoveEvent(event)
def resizeEvent(self, event):
QGraphicsView.resizeEvent(self, event)
self.update_view()
# noinspection PyPep8Naming
def resizeView(self):
self.update_view()
def updateSceneRect(self, rect):
super().updateSceneRect(rect)
self.update_view()
def update_view(self, *_):
if self.scene():
self.fitInView(self.scene().sceneRect())
def paintEvent(self, event):
super().paintEvent(event)
painter = QPainter(self.viewport())
painter.setBrush(QColor(100, 100, 100, 100))
painter.setRenderHints(self.renderHints())
painter.drawPolygon(self.viewPolygon())
# noinspection PyPep8Naming
def viewPolygon(self):
return self.mapFromScene(
self.master_view.mapToScene(self.master_view.viewport().rect()))
class OWTreeViewer2D(OWWidget):
zoom = Setting(5)
line_width_method = Setting(2)
max_tree_depth = Setting(0)
max_node_width = Setting(150)
_VSPACING = 5
_HSPACING = 5
_TOOLTIPS_ENABLED = True
_DEF_NODE_WIDTH = 24
_DEF_NODE_HEIGHT = 20
graph_name = "scene"
def __init__(self):
super().__init__()
self.selected_node = None
self.root_node = None
self.tree = None
box = gui.vBox(
self.controlArea, 'Tree', addSpace=20,
sizePolicy=QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Fixed))
self.info = gui.widgetLabel(box, 'No tree.')
layout = QFormLayout()
layout.setVerticalSpacing(20)
layout.setFieldGrowthPolicy(layout.ExpandingFieldsGrow)
box = self.display_box = \
gui.widgetBox(self.controlArea, "Display", addSpace=True,
orientation=layout)
layout.addRow(
"Zoom: ",
gui.hSlider(box, self, 'zoom',
minValue=1, maxValue=10, step=1, ticks=False,
callback=self.toggle_zoom_slider,
createLabel=False, addToLayout=False, addSpace=False))
layout.addRow(
"Width: ",
gui.hSlider(box, self, 'max_node_width',
minValue=50, maxValue=200, step=1, ticks=False,
callback=self.toggle_node_size,
createLabel=False, addToLayout=False, addSpace=False))
policy = QSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Fixed)
layout.addRow(
"Depth: ",
gui.comboBox(box, self, 'max_tree_depth',
items=["Unlimited"] + [
"{} levels".format(x) for x in range(2, 10)],
addToLayout=False, sendSelectedValue=False,
callback=self.toggle_tree_depth, sizePolicy=policy))
layout.addRow(
"Edge width: ",
gui.comboBox(box, self, 'line_width_method',
items=['Fixed', 'Relative to root',
'Relative to parent'],
addToLayout=False,
callback=self.toggle_line_width, sizePolicy=policy))
self.resize(800, 500)
def send_report(self):
from PyQt4.QtSvg import QSvgGenerator
if self.tree:
self.reportSection("Tree")
urlfn, filefn = self.getUniqueImageName(ext=".svg")
svg = QSvgGenerator()
svg.setFileName(filefn)
ssize = self.scene.sceneRect().size()
w, h = ssize.width(), ssize.height()
fact = 600 / w
svg.setSize(QSize(600, h * fact))
painter = QPainter()
painter.begin(svg)
self.scene.render(painter)
painter.end()
# from OWDlgs import OWChooseImageSizeDlg
# self.reportImage(OWChooseImageSizeDlg(self.scene).saveImage)
# self.report_object(self.svg_type, urlfn, width="600",
# height=str(h*fact))
def toggle_zoom_slider(self):
k = 0.0028 * (self.zoom ** 2) + 0.2583 * self.zoom + 1.1389
self.scene_view.setTransform(QTransform().scale(k / 2, k / 2))
self.scene.update()
def toggle_tree_depth(self):
self.walkupdate(self.root_node)
self.scene.fix_pos(self.root_node, 10, 10)
self.scene.update()
def toggle_line_width(self):
if self.root_node is None:
return
root_instances = self.root_node.num_instances()
width = 3
for edge in self.scene.edges():
num_inst = edge.node2.num_instances()
if self.line_width_method == 1:
width = 8 * num_inst / root_instances
elif self.line_width_method == 2:
width = 8 * num_inst / edge.node1.num_instances()
edge.setPen(QPen(Qt.gray, width, Qt.SolidLine, Qt.RoundCap))
self.scene.update()
def toggle_node_size(self):
self.set_node_info()
self.scene.update()
self.sceneView.repaint()
def toggle_navigator(self):
self.nav_widget.setHidden(not self.nav_widget.isHidden())
def activate_loaded_settings(self):
if not self.tree:
return
self.rescale_tree()
self.scene.fix_pos(self.root_node, 10, 10)
self.scene.update()
self.toggle_tree_depth()
self.toggle_line_width()
def ctree(self, tree):
self.clear()
if not tree:
self.centerRootButton.setDisabled(1)
self.centerNodeButton.setDisabled(0)
self.infoa.setText('No tree.')
self.infob.setText('')
self.tree = None
self.root_node = None
else:
self.infoa.setText('Tree.')
self.tree = tree
self.root_node = self.walkcreate(self.tree.clf.tree_, None)
self.scene.fix_pos(self.root_node, self._HSPACING, self._VSPACING)
self.activate_loaded_settings()
self.sceneView.centerOn(self.root_node.x(), self.root_node.y())
self.update_node_tooltips()
self.centerRootButton.setDisabled(0)
self.centerNodeButton.setDisabled(1)
self.scene.update()
def walkcreate(self, tree, parent=None, level=0, i=0):
node = GraphicsNode(tree, parent, None, self.scene)
if parent:
parent.graph_add_edge(GraphicsEdge(None, self.scene,
node1=parent, node2=node))
left_child_ind = tree.children_left[i]
right_child_ind = tree.children_right[i]
if right_child_ind >= 0:
self.walkcreate(tree, parent=node, level=level + 1,
i=right_child_ind)
if left_child_ind >= 0:
self.walkcreate(tree, parent=node, level=level + 1,
i=left_child_ind)
return node
def walkupdate(self, node, level=0):
if not node:
return
if self.max_tree_depth and self.max_tree_depth < level + 1:
node.set_open(False)
return
else:
node.set_open(True)
for n in node.branches:
self.walkupdate(n, level + 1)
def clear(self):
self.tree = None
self.root_node = None
self.scene.clear()
self.scene.setSceneRect(QRectF())
def update_node_tooltips(self):
for node in self.scene.nodes():
node.setToolTip(self.node_tooltip(node) if self._TOOLTIPS_ENABLED
else "")
def node_tooltip(self, tree):
return "tree node"
def rescale_tree(self):
node_height = self._DEF_NODE_HEIGHT
node_width = self._DEF_NODE_WIDTH
for r in self.scene.nodeList:
r.set_rect(r.rect().x(), r.rect().y(), node_width, node_height)
self.scene.fix_pos()
def update_selection(self):
self.selected_node = (self.scene.selectedItems() + [None])[0]
# self.centerNodeButton.setDisabled(not self.selected_node)
# self.send("Data", self.selectedNode.tree.examples if self.selectedNode
# else None)
def save_graph(self):
save_plot(data=dict(scene=self.scene, tree=self.tree),
file_formats=dict(chain(FileFormat.img_writers.items(),
FileFormat.graph_writers.items())))
| {
"content_hash": "79cbcc4b7de1a703a4b05d8b01b5f19c",
"timestamp": "",
"source": "github",
"line_count": 553,
"max_line_length": 80,
"avg_line_length": 34.15370705244123,
"alnum_prop": 0.5769047492984593,
"repo_name": "qPCR4vir/orange3",
"id": "586f3ee8123cc8e081f518b660139b986d7e06b8",
"size": "18887",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Orange/widgets/classify/owtreeviewer2d.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "20412"
},
{
"name": "C++",
"bytes": "1992"
},
{
"name": "GLSL",
"bytes": "75"
},
{
"name": "HTML",
"bytes": "3503"
},
{
"name": "JavaScript",
"bytes": "12007"
},
{
"name": "Jupyter Notebook",
"bytes": "6662"
},
{
"name": "NSIS",
"bytes": "20281"
},
{
"name": "Python",
"bytes": "4205054"
},
{
"name": "Shell",
"bytes": "48335"
}
],
"symlink_target": ""
} |
__author__ = 'elsabakiu, neilthemathguy, dmorina'
from rest_framework import status, viewsets
from rest_framework.response import Response
from crowdsourcing.serializers.project import *
from rest_framework.decorators import detail_route, list_route
from crowdsourcing.models import Module, Category, Project, Requester, ProjectRequester, \
ModuleReview, ModuleRating, BookmarkedProjects
from crowdsourcing.permissions.project import IsProjectOwnerOrCollaborator
from crowdsourcing.permissions.util import IsOwnerOrReadOnly
from crowdsourcing.permissions.project import IsReviewerOrRaterOrReadOnly
from rest_framework.permissions import IsAuthenticated
from rest_framework import mixins
from django.shortcuts import get_object_or_404
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.filter(deleted=False)
serializer_class = CategorySerializer
@detail_route(methods=['post'])
def update_category(self, request, id=None):
category_serializer = CategorySerializer(data=request.data)
category = self.get_object()
if category_serializer.is_valid():
category_serializer.update(category,category_serializer.validated_data)
return Response({'status': 'updated category'})
else:
return Response(category_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
category = self.queryset
categories_serialized = CategorySerializer(category, many=True)
return Response(categories_serialized.data)
except:
return Response([])
def destroy(self, request, *args, **kwargs):
category_serializer = CategorySerializer()
category = self.get_object()
category_serializer.delete(category)
return Response({'status': 'deleted category'})
class ProjectViewSet(viewsets.ModelViewSet):
queryset = Project.objects.filter(deleted=False)
serializer_class = ProjectSerializer
permission_classes = [IsAuthenticated]
@detail_route(methods=['post'], permission_classes=[IsProjectOwnerOrCollaborator])
def update_project(self, request, pk=None):
project_serializer = ProjectSerializer(data=request.data, partial=True)
project = self.get_object()
if project_serializer.is_valid():
project_serializer.update(project,project_serializer.validated_data)
return Response({'status': 'updated project'})
else:
return Response(project_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def list(self, request, *args, **kwargs):
try:
projects = Project.objects.all()
projects_serialized = ProjectSerializer(projects, many=True)
return Response(projects_serialized.data)
except:
return Response([])
@list_route(methods=['GET'])
def requester_projects(self, request, **kwargs):
projects = request.user.userprofile.requester.project_owner.all()
serializer = ProjectSerializer(instance = projects,many = True)
return Response(serializer.data)
@list_route(methods=['get'])
def get_bookmarked_projects(self, request, **kwargs):
user_profile = request.user.userprofile
bookmarked_projects = models.BookmarkedProjects.objects.all().filter(profile=user_profile)
projects = bookmarked_projects.values('project',).all()
project_instances = models.Project.objects.all().filter(pk__in=projects)
serializer = ProjectSerializer(instance=project_instances, many=True)
return Response(serializer.data, 200)
def create(self, request, *args, **kwargs):
project_serializer = ProjectSerializer(data=request.data)
if project_serializer.is_valid():
project_serializer.create(owner=request.user.userprofile)
return Response({'status': 'Project created'})
else:
return Response(project_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, *args, **kwargs):
project_serializer = ProjectSerializer()
project = self.get_object()
project_serializer.delete(project)
return Response({'status': 'deleted project'})
class ModuleViewSet(viewsets.ModelViewSet):
queryset = Module.objects.all()
serializer_class = ModuleSerializer
permission_classes=[IsOwnerOrReadOnly, IsAuthenticated]
def create(self, request, *args, **kwargs):
module_serializer = ModuleSerializer(data=request.data)
if module_serializer.is_valid():
module_serializer.create(owner=request.user.userprofile)
return Response({'status': 'Module created'})
else:
return Response(module_serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
class ModuleReviewViewSet(viewsets.ModelViewSet):
permission_classes=[IsReviewerOrRaterOrReadOnly]
def get_queryset(self):
queryset = ModuleReview.objects.all()
moduleid=self.request.query_params.get('moduleid',None)
queryset = ModuleReview.objects.filter(module__id=moduleid)
return queryset
serializer_class = ModuleReviewSerializer
class ModuleRatingViewSet(viewsets.ModelViewSet):
permission_classes=[IsReviewerOrRaterOrReadOnly]
def get_queryset(self):
moduleid = self.request.query_params.get('moduleid')
if self.request.user.is_authenticated():
queryset = ModuleRating.objects.filter(module_id = moduleid).filter(worker__profile__user = self.request.user)
else:
queryset = ModuleRating.objects.none()
return queryset
serializer_class = ModuleRatingSerializer
class ProjectRequesterViewSet(mixins.CreateModelMixin, mixins.DestroyModelMixin,
mixins.RetrieveModelMixin, viewsets.GenericViewSet):
serializer_class = ProjectRequesterSerializer
queryset = ProjectRequester.objects.all()
#TODO to be moved under Project
def retrieve(self, request, *args, **kwargs):
project_requester = get_object_or_404(self.queryset, project=get_object_or_404(Project.objects.all(),id=kwargs['pk']))
serializer = ProjectRequesterSerializer(instance=project_requester)
return Response(serializer.data, status.HTTP_200_OK)
class BookmarkedProjectsViewSet(mixins.CreateModelMixin, mixins.DestroyModelMixin,
mixins.RetrieveModelMixin, viewsets.GenericViewSet):
queryset = BookmarkedProjects.objects.all()
serializer_class = BookmarkedProjectsSerializer
permission_classes=[IsAuthenticated]
def create(self, request, *args, **kwargs):
serializer = BookmarkedProjectsSerializer(data=request.data)
if serializer.is_valid():
serializer.create(profile=request.user.userprofile)
return Response({"Status": "OK"})
else:
return Response(serializer.errors, status.HTTP_400_BAD_REQUEST) | {
"content_hash": "6e9e0bad1b11389360d5f231b9cb3f2d",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 126,
"avg_line_length": 42.44642857142857,
"alnum_prop": 0.6940120600196326,
"repo_name": "ryosuzuki/crowdsource-platform",
"id": "39e15bb3aa3946cb3e8f5fee484d9acf6e1c079d",
"size": "7131",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop2",
"path": "crowdsourcing/viewsets/project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "304886"
},
{
"name": "HTML",
"bytes": "201268"
},
{
"name": "JavaScript",
"bytes": "108907"
},
{
"name": "Python",
"bytes": "187373"
}
],
"symlink_target": ""
} |
from glad.loader import BaseLoader
_EGL_LOADER = '''
int gladLoadEGL(void) {
return gladLoadEGLLoader((GLADloadproc)eglGetProcAddress);
}
'''
_EGL_HEADER = '''
#ifndef __glad_egl_h_
#ifdef __egl_h_
#error EGL header already included, remove this include, glad already provides it
#endif
#define __glad_egl_h_
#define __egl_h_
#if defined(_WIN32) && !defined(APIENTRY) && !defined(__CYGWIN__) && !defined(__SCITECH_SNAP__)
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN 1
#endif
#include <windows.h>
#endif
#ifndef APIENTRY
#define APIENTRY
#endif
#ifndef APIENTRYP
#define APIENTRYP APIENTRY *
#endif
#ifndef GLAPI
#define GLAPI extern
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef void* (* GLADloadproc)(const char *name);
GLAPI int gladLoadEGLLoader(GLADloadproc);
'''
_EGL_HEADER_LOADER = '''
GLAPI int gladLoadEGL(void);
'''
_EGL_HEADER_END = '''
#ifdef __cplusplus
}
#endif
#endif
'''
_EGL_HAS_EXT = '''
'''
class EGLCLoader(BaseLoader):
def write(self, fobj, apis):
if not self.disabled:
fobj.write(_EGL_LOADER)
def write_begin_load(self, fobj):
pass
def write_end_load(self, fobj):
fobj.write('\treturn 1;\n')
def write_find_core(self, fobj):
pass
def write_has_ext(self, fobj):
fobj.write(_EGL_HAS_EXT)
def write_header(self, fobj):
fobj.write(_EGL_HEADER)
if not self.disabled:
fobj.write(_EGL_HEADER_LOADER)
def write_header_end(self, fobj):
fobj.write(_EGL_HEADER_END)
| {
"content_hash": "16a4b6805ac887df2007b8d0a3642ed0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 95,
"avg_line_length": 18.30952380952381,
"alnum_prop": 0.6521456436931079,
"repo_name": "dbralir/glad",
"id": "4c01c52bdb68710cc941703d79a0dc0994bb833b",
"size": "1538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glad/loader/egl/c.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1659"
},
{
"name": "C++",
"bytes": "2445"
},
{
"name": "Python",
"bytes": "88697"
},
{
"name": "Shell",
"bytes": "3321"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import django.db.models.deletion
import sentry.db.models.fields.onetoone
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Adding indexes to large tables. These indexes should be created concurrently,
# unfortunately we can't run migrations outside of a transaction until Django
# 1.10. So until then these should be run manually.
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
dependencies = [
('sentry', '0020_auto_20191125_1420'),
]
operations = [
migrations.SeparateDatabaseAndState(
database_operations=[],
state_operations=[
migrations.AlterField(
model_name='incidentsnapshot',
name='incident',
field=sentry.db.models.fields.onetoone.OneToOneCascadeDeletes(on_delete=django.db.models.deletion.CASCADE, to='sentry.Incident'),
),
],
)
]
| {
"content_hash": "39f888d22df700c95b27a0f79f07479b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 149,
"avg_line_length": 41.48717948717949,
"alnum_prop": 0.6687268232385661,
"repo_name": "beeftornado/sentry",
"id": "5550472a7dc3af2b9615c7f9ce5ce73d021cd38a",
"size": "1691",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/migrations/0021_auto_20191203_1803.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "157195"
},
{
"name": "HTML",
"bytes": "197026"
},
{
"name": "JavaScript",
"bytes": "380379"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6473603"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from django.template import TemplateDoesNotExist
try:
from django.template.loaders.base import Loader as BaseLoader
except ImportError:
from django.template.loader import BaseLoader
try:
from django.template import Origin
except ImportError:
has_origin = False
else:
has_origin = True
from nose import tools
templates = {}
class GlobalMemTemplateLoader(BaseLoader):
is_usable = True
def __init__(self, *args, **kwargs):
super(GlobalMemTemplateLoader, self).__init__(*args, **kwargs)
def __call__(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
def load_template(self, template_name, template_dirs=None):
"Dummy template loader that returns templates from templates dictionary."
try:
return templates[template_name], template_name
except KeyError as e:
raise TemplateDoesNotExist(e)
def get_contents(self, origin):
try:
return templates[origin.name]
except KeyError:
raise TemplateDoesNotExist(origin)
def get_template_sources(self, template_name):
if has_origin:
yield Origin(
name=template_name,
template_name=template_name,
loader=self,
)
def load_template_source(self, template_name, template_dirs=None):
return self.load_template(template_name, template_dirs)
class TestDummyTemplateLoader(TestCase):
def tearDown(self):
global templates
templates = {}
def test_simple(self):
loader = GlobalMemTemplateLoader()
templates['anything.html'] = 'Something'
source, name = loader.load_template_source('anything.html')
tools.assert_equals('anything.html', name)
tools.assert_equals('Something', source)
def test_empty(self):
loader = GlobalMemTemplateLoader()
tools.assert_raises(TemplateDoesNotExist, loader.load_template_source, 'anything.html')
| {
"content_hash": "4d052c2164b22ff5b8ba8a8c2812161f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 95,
"avg_line_length": 28.72222222222222,
"alnum_prop": 0.663926499032882,
"repo_name": "MichalMaM/ella",
"id": "032debe4bbce1d27a898acdf5ee7bb9686fd12e8",
"size": "2068",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test_ella/template_loader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10506"
},
{
"name": "Python",
"bytes": "673298"
}
],
"symlink_target": ""
} |
__all__ = ['TreekvParser']
class Logger(object):
def __init__(self):
pass
def write(self, message):
print(message)
class KeyValue(object):
def __init__(self, key, value=None, lineinfo=None, logger=Logger()):
self.key = key
self.value = value
self._list = []
self.lineinfo = lineinfo
self.logger = logger
self._flag = False
self._parent = None
self.path = '<unkown>/{}'.format(key)
def value_list(self):
return list(v.strip() for v in self.value.split(','))
def __iter__(self):
yield self.key
yield self.value
def set_parent(self, parent):
self._parent = parent
paths = []
p = self
while p:
paths.append(p.key)
p = p._parent
self.path = '/'.join(paths[::-1])
def items(self):
return iter(self._list)
def descendants(self):
yield self
for l in self._list:
for ll in l.descendants():
yield ll
def get_path(self, path):
if path=='.':
return [self]
elif '/' not in path:
ret = self.get(path)
return ret
else:
p,sep,q = path.partition('/')
ret = []
for rr in self.get(p):
ret += rr.get_path(q)
return ret
def add(self, kv):
kv.set_parent(self)
self._list.append(kv)
def has_item(self):
return len(self._list)>0
def last(self):
try:
return self._list[-1]
except:
raise IndexError()
def get(self, key):
ret = []
for item in self._list:
if item.key==key:
ret.append(item)
return ret
def __repr__(self):
return "KeyValue({},{},{})".format(self.key, self.value, self._list)
def text(self, indent=0):
ret = ''
if not self._list:
ret = "{}: {} {}\n".format(self.key, self.value, self.path)
else:
ret = "{}: {} {}\n".format(self.key, self.value, self.path)
for l in self._list:
ret += l.text(indent+4)
return ''.join(' '*indent+l+'\n' for l in ret.splitlines())
def set_used(self):
self._flag = True
def get_one(self, path):
r = self.get_path(path)
if not r:
return None
elif len(r)==1:
r[0].set_used()
return r[0]
else:
self.logger.write('Too many items {}'.format(path))
return None
def get_many(self,path):
r = self.get_path(path)
for rr in r:
rr.set_used()
return r
def get_tree(self, path):
r = self.get_path(path)
for rr in r.descendants():
rr.set_used()
return r
def get_unused(self):
for rr in self.descendants():
if not rr._flag:
yield rr
class Lineinfo(object):
def __init__(self, filename, line, lineno):
self.filename = filename
self.line = line
self.lineno = lineno
def error_msg(self, msg):
return '{}\nin "{}", line {}: \n{}'.format(msg, self.filename, self.lineno, self.line)
def count_until(items, value):
count = 0
for i in items:
if i==value:
count += 1
else:
break
return count
class TreekvParser(object):
def __init__(self, tab_size=4, logger=Logger()):
self.tab_size = tab_size
self.logger = logger
def read(self, filename):
return self.readfp(open(filename,'rU'), filename)
def readfp(self, fileobj, filename=None):
if not filename:
try:
filename = fileobj.name
except:
filename = '<unkown>'
lineno = 0
root = KeyValue('root')
tab_stops = [root]
for line in fileobj:
lineno += 1
li = Lineinfo(filename, line, lineno)
tab = count_until(line, ' ')
if tab % self.tab_size != 0:
self.logger.write(li.error_msg('Ignoring the line due to unkown tab stop {}. tab stops must be {}*n'.format(tab, self.tab_size)) )
continue
l = line.lstrip()
if not l or l.startswith('#') or l.startswith('//') or l.startswith(';'):
continue
if not l:
continue
if ':' not in l:
self.logger.write(li.error_msg('Unkown line. line format must be "key:value"'))
continue
key,sep,value = l.partition(':')
item = KeyValue(key.strip(), value.strip(), li, self.logger)
level = int(tab / self.tab_size)
current_level = len(tab_stops) - 1
current_parent = tab_stops[-1]
if level==current_level:
current_parent.add(item)
elif level == current_level+1:
assert(current_parent.has_item())
new_parent = current_parent.last()
new_parent.add(item)
tab_stops.append(new_parent)
elif level > current_level:
self.logger.write(li.error_msg('Too many indent spaces. This indent must be less than {}, but {}'.format(self.tab_size*(level+1), self.tab_size*level) ))
continue
elif level < current_level:
tab_stops = tab_stops[:level+1]
parent = tab_stops[-1]
parent.add(item)
return root
sample = """
general:
gene: NDRG2
primers: primers.txt
bsa_data: bsa_data.txt
tss: TSS
tissues: Brian, Liver, Colon
motifs: motifs
p53BS: GTGCAAGGTCCGGGGCGCTTGGCA
TATAbox: TATAWAW
mir650: TGCCTCC
BamHI: GGATCC
XhoI: CTCGAG
ecorv: GATATC
ecori: GAATTC
WT1: GTGTGTGTGTGTG
HRE3: GCGTG
HRE2: GCGTG
HRE1: GCGTCC
probe: CGGGCGGCTGGACGCTTCCAGGCTCTGCTCGGCTCACCAAAACATTCCAC
pcr: Genomic-PCR
ChIP1: ChIP1-FW, ChIP1-RV
ChIP2: ChIP2-FW, ChIP2-RV
ChIP2-dash: BSP4-FW, ChIP2-RV
ChIP3: ChIP3-FW, ChIP3-RV
upstream: genome-up-stream-FW, NDRG2 cDNA 1ab-3 RV
"""
if __name__=='__main__':
import io
parser = TreekvParser()
kv = parser.readfp(io.StringIO(sample))
print(kv.text())
| {
"content_hash": "20f7bf5c50e8b098f90aebba1574dc7d",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 169,
"avg_line_length": 25.894736842105264,
"alnum_prop": 0.5129768605378362,
"repo_name": "mizuy/seqtool",
"id": "153aa550812711a4079fd31b420eb26ceb2b75b1",
"size": "6396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "seqtool/util/parser.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2913"
},
{
"name": "C++",
"bytes": "36987"
},
{
"name": "Perl",
"bytes": "507"
},
{
"name": "Python",
"bytes": "228761"
},
{
"name": "Shell",
"bytes": "287"
}
],
"symlink_target": ""
} |
"""
MoinMoin - Create list of LikePages
@copyright: 2004 Johannes Berg <johannes@sipsolutions.de>
@license: GNU GPL, see COPYING for details.
"""
Dependencies = ['namespace']
from MoinMoin.action import LikePages
def macro_LikePages(macro, text=u'(none)'):
request = macro.request
# we don't want to spend much CPU for spiders requesting nonexisting pages
if not request.isSpiderAgent:
pagename = macro.formatter.page.page_name
# Get matches
start, end, matches = LikePages.findMatches(pagename, request)
# Render matches
if matches and not isinstance(matches, (str, unicode)):
return request.redirectedOutput(LikePages.showMatches, pagename, request, start, end, matches, False)
else:
# if we did not find any similar pages, we just render the text we got as argument:
return request.formatter.text(text)
# bots get nothing:
return ''
| {
"content_hash": "662cb3acb876d0edf02838283ec79d00",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 113,
"avg_line_length": 33.06896551724138,
"alnum_prop": 0.67570385818561,
"repo_name": "RealTimeWeb/wikisite",
"id": "cae6778ce335160b0cffb4c35b15c15c0baaa1a3",
"size": "988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MoinMoin/macro/LikePages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "49395"
},
{
"name": "CSS",
"bytes": "204104"
},
{
"name": "ColdFusion",
"bytes": "142312"
},
{
"name": "Java",
"bytes": "491798"
},
{
"name": "JavaScript",
"bytes": "2107106"
},
{
"name": "Lasso",
"bytes": "23464"
},
{
"name": "Makefile",
"bytes": "4950"
},
{
"name": "PHP",
"bytes": "144585"
},
{
"name": "Perl",
"bytes": "44627"
},
{
"name": "Python",
"bytes": "7647140"
},
{
"name": "Shell",
"bytes": "335"
}
],
"symlink_target": ""
} |
"""
This module houses the main classes you will interact with,
:class:`.Cluster` and :class:`.Session`.
"""
from __future__ import absolute_import
import atexit
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
import logging
from random import random
import socket
import sys
import time
from threading import Lock, RLock, Thread, Event
import six
from six.moves import range
from six.moves import queue as Queue
import weakref
from weakref import WeakValueDictionary
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # NOQA
from functools import partial, wraps
from itertools import groupby
from cassandra import (ConsistencyLevel, AuthenticationFailed,
InvalidRequest, OperationTimedOut,
UnsupportedOperation, Unauthorized)
from cassandra.connection import (ConnectionException, ConnectionShutdown,
ConnectionHeartbeat)
from cassandra.cqltypes import UserType
from cassandra.encoder import Encoder
from cassandra.protocol import (QueryMessage, ResultMessage,
ErrorMessage, ReadTimeoutErrorMessage,
WriteTimeoutErrorMessage,
UnavailableErrorMessage,
OverloadedErrorMessage,
PrepareMessage, ExecuteMessage,
PreparedQueryNotFound,
IsBootstrappingErrorMessage,
BatchMessage, RESULT_KIND_PREPARED,
RESULT_KIND_SET_KEYSPACE, RESULT_KIND_ROWS,
RESULT_KIND_SCHEMA_CHANGE)
from cassandra.metadata import Metadata, protect_name
from cassandra.policies import (RoundRobinPolicy, SimpleConvictionPolicy,
ExponentialReconnectionPolicy, HostDistance,
RetryPolicy)
from cassandra.pool import (Host, _ReconnectionHandler, _HostReconnectionHandler,
HostConnectionPool, HostConnection,
NoConnectionsAvailable)
from cassandra.query import (SimpleStatement, PreparedStatement, BoundStatement,
BatchStatement, bind_params, QueryTrace, Statement,
named_tuple_factory, dict_factory, FETCH_SIZE_UNSET)
def _is_eventlet_monkey_patched():
if 'eventlet.patcher' not in sys.modules:
return False
import eventlet.patcher
return eventlet.patcher.is_monkey_patched('socket')
# default to gevent when we are monkey patched with gevent, eventlet when
# monkey patched with eventlet, otherwise if libev is available, use that as
# the default because it's fastest. Otherwise, use asyncore.
if 'gevent.monkey' in sys.modules:
from cassandra.io.geventreactor import GeventConnection as DefaultConnection
elif _is_eventlet_monkey_patched():
from cassandra.io.eventletreactor import EventletConnection as DefaultConnection
else:
try:
from cassandra.io.libevreactor import LibevConnection as DefaultConnection # NOQA
except ImportError:
from cassandra.io.asyncorereactor import AsyncoreConnection as DefaultConnection # NOQA
# Forces load of utf8 encoding module to avoid deadlock that occurs
# if code that is being imported tries to import the module in a seperate
# thread.
# See http://bugs.python.org/issue10923
"".encode('utf8')
log = logging.getLogger(__name__)
DEFAULT_MIN_REQUESTS = 5
DEFAULT_MAX_REQUESTS = 100
DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST = 2
DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST = 8
DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST = 1
DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST = 2
_NOT_SET = object()
class NoHostAvailable(Exception):
"""
Raised when an operation is attempted but all connections are
busy, defunct, closed, or resulted in errors when used.
"""
errors = None
"""
A map of the form ``{ip: exception}`` which details the particular
Exception that was caught for each host the operation was attempted
against.
"""
def __init__(self, message, errors):
Exception.__init__(self, message, errors)
self.errors = errors
def _future_completed(future):
""" Helper for run_in_executor() """
exc = future.exception()
if exc:
log.debug("Failed to run task on executor", exc_info=exc)
def run_in_executor(f):
"""
A decorator to run the given method in the ThreadPoolExecutor.
"""
@wraps(f)
def new_f(self, *args, **kwargs):
if self.is_shutdown:
return
try:
future = self.executor.submit(f, self, *args, **kwargs)
future.add_done_callback(_future_completed)
except Exception:
log.exception("Failed to submit task to executor")
return new_f
def _shutdown_cluster(cluster):
if cluster and not cluster.is_shutdown:
cluster.shutdown()
class Cluster(object):
"""
The main class to use when interacting with a Cassandra cluster.
Typically, one instance of this class will be created for each
separate Cassandra cluster that your application interacts with.
Example usage::
>>> from cassandra.cluster import Cluster
>>> cluster = Cluster(['192.168.1.1', '192.168.1.2'])
>>> session = cluster.connect()
>>> session.execute("CREATE KEYSPACE ...")
>>> ...
>>> cluster.shutdown()
"""
contact_points = ['127.0.0.1']
"""
The list of contact points to try connecting for cluster discovery.
Defaults to loopback interface.
Note: When using :class:`.DCAwareLoadBalancingPolicy` with no explicit
local_dc set, the DC is chosen from an arbitrary host in contact_points.
In this case, contact_points should contain only nodes from a single,
local DC.
"""
port = 9042
"""
The server-side port to open connections to. Defaults to 9042.
"""
cql_version = None
"""
If a specific version of CQL should be used, this may be set to that
string version. Otherwise, the highest CQL version supported by the
server will be automatically used.
"""
protocol_version = 2
"""
The version of the native protocol to use.
Version 2 of the native protocol adds support for lightweight transactions,
batch operations, and automatic query paging. The v2 protocol is
supported by Cassandra 2.0+.
Version 3 of the native protocol adds support for protocol-level
client-side timestamps (see :attr:`.Session.use_client_timestamp`),
serial consistency levels for :class:`~.BatchStatement`, and an
improved connection pool.
The following table describes the native protocol versions that
are supported by each version of Cassandra:
+-------------------+-------------------+
| Cassandra Version | Protocol Versions |
+===================+===================+
| 1.2 | 1 |
+-------------------+-------------------+
| 2.0 | 1, 2 |
+-------------------+-------------------+
| 2.1 | 1, 2, 3 |
+-------------------+-------------------+
"""
compression = True
"""
Controls compression for communications between the driver and Cassandra.
If left as the default of :const:`True`, either lz4 or snappy compression
may be used, depending on what is supported by both the driver
and Cassandra. If both are fully supported, lz4 will be preferred.
You may also set this to 'snappy' or 'lz4' to request that specific
compression type.
Setting this to :const:`False` disables compression.
"""
_auth_provider = None
_auth_provider_callable = None
@property
def auth_provider(self):
"""
When :attr:`~.Cluster.protocol_version` is 2 or higher, this should
be an instance of a subclass of :class:`~cassandra.auth.AuthProvider`,
such as :class:`~.PlainTextAuthProvider`.
When :attr:`~.Cluster.protocol_version` is 1, this should be
a function that accepts one argument, the IP address of a node,
and returns a dict of credentials for that node.
When not using authentication, this should be left as :const:`None`.
"""
return self._auth_provider
@auth_provider.setter # noqa
def auth_provider(self, value):
if not value:
self._auth_provider = value
return
try:
self._auth_provider_callable = value.new_authenticator
except AttributeError:
if self.protocol_version > 1:
raise TypeError("auth_provider must implement the cassandra.auth.AuthProvider "
"interface when protocol_version >= 2")
elif not callable(value):
raise TypeError("auth_provider must be callable when protocol_version == 1")
self._auth_provider_callable = value
self._auth_provider = value
load_balancing_policy = None
"""
An instance of :class:`.policies.LoadBalancingPolicy` or
one of its subclasses. Defaults to :class:`~.RoundRobinPolicy`.
"""
reconnection_policy = ExponentialReconnectionPolicy(1.0, 600.0)
"""
An instance of :class:`.policies.ReconnectionPolicy`. Defaults to an instance
of :class:`.ExponentialReconnectionPolicy` with a base delay of one second and
a max delay of ten minutes.
"""
default_retry_policy = RetryPolicy()
"""
A default :class:`.policies.RetryPolicy` instance to use for all
:class:`.Statement` objects which do not have a :attr:`~.Statement.retry_policy`
explicitly set.
"""
conviction_policy_factory = SimpleConvictionPolicy
"""
A factory function which creates instances of
:class:`.policies.ConvictionPolicy`. Defaults to
:class:`.policies.SimpleConvictionPolicy`.
"""
connect_to_remote_hosts = True
"""
If left as :const:`True`, hosts that are considered :attr:`~.HostDistance.REMOTE`
by the :attr:`~.Cluster.load_balancing_policy` will have a connection
opened to them. Otherwise, they will not have a connection opened to them.
.. versionadded:: 2.1.0
"""
metrics_enabled = False
"""
Whether or not metric collection is enabled. If enabled, :attr:`.metrics`
will be an instance of :class:`~cassandra.metrics.Metrics`.
"""
metrics = None
"""
An instance of :class:`cassandra.metrics.Metrics` if :attr:`.metrics_enabled` is
:const:`True`, else :const:`None`.
"""
ssl_options = None
"""
A optional dict which will be used as kwargs for ``ssl.wrap_socket()``
when new sockets are created. This should be used when client encryption
is enabled in Cassandra.
By default, a ``ca_certs`` value should be supplied (the value should be
a string pointing to the location of the CA certs file), and you probably
want to specify ``ssl_version`` as ``ssl.PROTOCOL_TLSv1`` to match
Cassandra's default protocol.
"""
sockopts = None
"""
An optional list of tuples which will be used as arguments to
``socket.setsockopt()`` for all created sockets.
"""
max_schema_agreement_wait = 10
"""
The maximum duration (in seconds) that the driver will wait for schema
agreement across the cluster. Defaults to ten seconds.
If set <= 0, the driver will bypass schema agreement waits altogether.
"""
metadata = None
"""
An instance of :class:`cassandra.metadata.Metadata`.
"""
connection_class = DefaultConnection
"""
This determines what event loop system will be used for managing
I/O with Cassandra. These are the current options:
* :class:`cassandra.io.asyncorereactor.AsyncoreConnection`
* :class:`cassandra.io.libevreactor.LibevConnection`
* :class:`cassandra.io.geventreactor.GeventConnection` (requires monkey-patching)
* :class:`cassandra.io.twistedreactor.TwistedConnection`
By default, ``AsyncoreConnection`` will be used, which uses
the ``asyncore`` module in the Python standard library. The
performance is slightly worse than with ``libev``, but it is
supported on a wider range of systems.
If ``libev`` is installed, ``LibevConnection`` will be used instead.
If gevent monkey-patching of the standard library is detected,
GeventConnection will be used automatically.
"""
control_connection_timeout = 2.0
"""
A timeout, in seconds, for queries made by the control connection, such
as querying the current schema and information about nodes in the cluster.
If set to :const:`None`, there will be no timeout for these queries.
"""
idle_heartbeat_interval = 30
"""
Interval, in seconds, on which to heartbeat idle connections. This helps
keep connections open through network devices that expire idle connections.
It also helps discover bad connections early in low-traffic scenarios.
Setting to zero disables heartbeats.
"""
schema_event_refresh_window = 2
"""
Window, in seconds, within which a schema component will be refreshed after
receiving a schema_change event.
The driver delays a random amount of time in the range [0.0, window)
before executing the refresh. This serves two purposes:
1.) Spread the refresh for deployments with large fanout from C* to client tier,
preventing a 'thundering herd' problem with many clients refreshing simultaneously.
2.) Remove redundant refreshes. Redundant events arriving within the delay period
are discarded, and only one refresh is executed.
Setting this to zero will execute refreshes immediately.
Setting this negative will disable schema refreshes in response to push events
(refreshes will still occur in response to schema change responses to DDL statements
executed by Sessions of this Cluster).
"""
topology_event_refresh_window = 10
"""
Window, in seconds, within which the node and token list will be refreshed after
receiving a topology_change event.
Setting this to zero will execute refreshes immediately.
Setting this negative will disable node refreshes in response to push events
(refreshes will still occur in response to new nodes observed on "UP" events).
See :attr:`.schema_event_refresh_window` for discussion of rationale
"""
sessions = None
control_connection = None
scheduler = None
executor = None
is_shutdown = False
_is_setup = False
_prepared_statements = None
_prepared_statement_lock = None
_idle_heartbeat = None
_user_types = None
"""
A map of {keyspace: {type_name: UserType}}
"""
_listeners = None
_listener_lock = None
def __init__(self,
contact_points=["127.0.0.1"],
port=9042,
compression=True,
auth_provider=None,
load_balancing_policy=None,
reconnection_policy=None,
default_retry_policy=None,
conviction_policy_factory=None,
metrics_enabled=False,
connection_class=None,
ssl_options=None,
sockopts=None,
cql_version=None,
protocol_version=2,
executor_threads=2,
max_schema_agreement_wait=10,
control_connection_timeout=2.0,
idle_heartbeat_interval=30,
schema_event_refresh_window=2,
topology_event_refresh_window=10):
"""
Any of the mutable Cluster attributes may be set as keyword arguments
to the constructor.
"""
if contact_points is not None:
if isinstance(contact_points, six.string_types):
raise TypeError("contact_points should not be a string, it should be a sequence (e.g. list) of strings")
self.contact_points = contact_points
self.port = port
self.compression = compression
self.protocol_version = protocol_version
self.auth_provider = auth_provider
if load_balancing_policy is not None:
if isinstance(load_balancing_policy, type):
raise TypeError("load_balancing_policy should not be a class, it should be an instance of that class")
self.load_balancing_policy = load_balancing_policy
else:
self.load_balancing_policy = RoundRobinPolicy()
if reconnection_policy is not None:
if isinstance(reconnection_policy, type):
raise TypeError("reconnection_policy should not be a class, it should be an instance of that class")
self.reconnection_policy = reconnection_policy
if default_retry_policy is not None:
if isinstance(default_retry_policy, type):
raise TypeError("default_retry_policy should not be a class, it should be an instance of that class")
self.default_retry_policy = default_retry_policy
if conviction_policy_factory is not None:
if not callable(conviction_policy_factory):
raise ValueError("conviction_policy_factory must be callable")
self.conviction_policy_factory = conviction_policy_factory
if connection_class is not None:
self.connection_class = connection_class
self.metrics_enabled = metrics_enabled
self.ssl_options = ssl_options
self.sockopts = sockopts
self.cql_version = cql_version
self.max_schema_agreement_wait = max_schema_agreement_wait
self.control_connection_timeout = control_connection_timeout
self.idle_heartbeat_interval = idle_heartbeat_interval
self.schema_event_refresh_window = schema_event_refresh_window
self.topology_event_refresh_window = topology_event_refresh_window
self._listeners = set()
self._listener_lock = Lock()
# let Session objects be GC'ed (and shutdown) when the user no longer
# holds a reference.
self.sessions = WeakSet()
self.metadata = Metadata()
self.control_connection = None
self._prepared_statements = WeakValueDictionary()
self._prepared_statement_lock = Lock()
self._user_types = defaultdict(dict)
self._min_requests_per_connection = {
HostDistance.LOCAL: DEFAULT_MIN_REQUESTS,
HostDistance.REMOTE: DEFAULT_MIN_REQUESTS
}
self._max_requests_per_connection = {
HostDistance.LOCAL: DEFAULT_MAX_REQUESTS,
HostDistance.REMOTE: DEFAULT_MAX_REQUESTS
}
self._core_connections_per_host = {
HostDistance.LOCAL: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST,
HostDistance.REMOTE: DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST
}
self._max_connections_per_host = {
HostDistance.LOCAL: DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST,
HostDistance.REMOTE: DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST
}
self.executor = ThreadPoolExecutor(max_workers=executor_threads)
self.scheduler = _Scheduler(self.executor)
self._lock = RLock()
if self.metrics_enabled:
from cassandra.metrics import Metrics
self.metrics = Metrics(weakref.proxy(self))
self.control_connection = ControlConnection(
self, self.control_connection_timeout,
self.schema_event_refresh_window, self.topology_event_refresh_window)
def register_user_type(self, keyspace, user_type, klass):
"""
Registers a class to use to represent a particular user-defined type.
Query parameters for this user-defined type will be assumed to be
instances of `klass`. Result sets for this user-defined type will
be instances of `klass`. If no class is registered for a user-defined
type, a namedtuple will be used for result sets, and non-prepared
statements may not encode parameters for this type correctly.
`keyspace` is the name of the keyspace that the UDT is defined in.
`user_type` is the string name of the UDT to register the mapping
for.
`klass` should be a class with attributes whose names match the
fields of the user-defined type. The constructor must accepts kwargs
for each of the fields in the UDT.
This method should only be called after the type has been created
within Cassandra.
Example::
cluster = Cluster(protocol_version=3)
session = cluster.connect()
session.set_keyspace('mykeyspace')
session.execute("CREATE TYPE address (street text, zipcode int)")
session.execute("CREATE TABLE users (id int PRIMARY KEY, location address)")
# create a class to map to the "address" UDT
class Address(object):
def __init__(self, street, zipcode):
self.street = street
self.zipcode = zipcode
cluster.register_user_type('mykeyspace', 'address', Address)
# insert a row using an instance of Address
session.execute("INSERT INTO users (id, location) VALUES (%s, %s)",
(0, Address("123 Main St.", 78723)))
# results will include Address instances
results = session.execute("SELECT * FROM users")
row = results[0]
print row.id, row.location.street, row.location.zipcode
"""
self._user_types[keyspace][user_type] = klass
for session in self.sessions:
session.user_type_registered(keyspace, user_type, klass)
UserType.evict_udt_class(keyspace, user_type)
def get_min_requests_per_connection(self, host_distance):
return self._min_requests_per_connection[host_distance]
def set_min_requests_per_connection(self, host_distance, min_requests):
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_min_requests_per_connection() only has an effect "
"when using protocol_version 1 or 2.")
self._min_requests_per_connection[host_distance] = min_requests
def get_max_requests_per_connection(self, host_distance):
return self._max_requests_per_connection[host_distance]
def set_max_requests_per_connection(self, host_distance, max_requests):
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_requests_per_connection() only has an effect "
"when using protocol_version 1 or 2.")
self._max_requests_per_connection[host_distance] = max_requests
def get_core_connections_per_host(self, host_distance):
"""
Gets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.
"""
return self._core_connections_per_host[host_distance]
def set_core_connections_per_host(self, host_distance, core_connections):
"""
Sets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_core_connections_per_host() only has an effect "
"when using protocol_version 1 or 2.")
old = self._core_connections_per_host[host_distance]
self._core_connections_per_host[host_distance] = core_connections
if old < core_connections:
self._ensure_core_connections()
def get_max_connections_per_host(self, host_distance):
"""
Gets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 8 for :attr:`~HostDistance.LOCAL` and 2 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.
"""
return self._max_connections_per_host[host_distance]
def set_max_connections_per_host(self, host_distance, max_connections):
"""
Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.
"""
if self.protocol_version >= 3:
raise UnsupportedOperation(
"Cluster.set_max_connections_per_host() only has an effect "
"when using protocol_version 1 or 2.")
self._max_connections_per_host[host_distance] = max_connections
def connection_factory(self, address, *args, **kwargs):
"""
Called to create a new connection with proper configuration.
Intended for internal use only.
"""
kwargs = self._make_connection_kwargs(address, kwargs)
return self.connection_class.factory(address, *args, **kwargs)
def _make_connection_factory(self, host, *args, **kwargs):
kwargs = self._make_connection_kwargs(host.address, kwargs)
return partial(self.connection_class.factory, host.address, *args, **kwargs)
def _make_connection_kwargs(self, address, kwargs_dict):
if self._auth_provider_callable:
kwargs_dict['authenticator'] = self._auth_provider_callable(address)
kwargs_dict['port'] = self.port
kwargs_dict['compression'] = self.compression
kwargs_dict['sockopts'] = self.sockopts
kwargs_dict['ssl_options'] = self.ssl_options
kwargs_dict['cql_version'] = self.cql_version
kwargs_dict['protocol_version'] = self.protocol_version
kwargs_dict['user_type_map'] = self._user_types
return kwargs_dict
def connect(self, keyspace=None):
"""
Creates and returns a new :class:`~.Session` object. If `keyspace`
is specified, that keyspace will be the default keyspace for
operations on the ``Session``.
"""
with self._lock:
if self.is_shutdown:
raise Exception("Cluster is already shut down")
if not self._is_setup:
log.debug("Connecting to cluster, contact points: %s; protocol version: %s",
self.contact_points, self.protocol_version)
self.connection_class.initialize_reactor()
atexit.register(partial(_shutdown_cluster, self))
for address in self.contact_points:
host, new = self.add_host(address, signal=False)
if new:
host.set_up()
for listener in self.listeners:
listener.on_add(host)
self.load_balancing_policy.populate(
weakref.proxy(self), self.metadata.all_hosts())
try:
self.control_connection.connect()
log.debug("Control connection created")
except Exception:
log.exception("Control connection failed to connect, "
"shutting down Cluster:")
self.shutdown()
raise
self.load_balancing_policy.check_supported()
if self.idle_heartbeat_interval:
self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders)
self._is_setup = True
session = self._new_session()
if keyspace:
session.set_keyspace(keyspace)
return session
def get_connection_holders(self):
holders = []
for s in self.sessions:
holders.extend(s.get_pools())
holders.append(self.control_connection)
return holders
def shutdown(self):
"""
Closes all sessions and connection associated with this Cluster.
To ensure all connections are properly closed, **you should always
call shutdown() on a Cluster instance when you are done with it**.
Once shutdown, a Cluster should not be used for any purpose.
"""
with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
if self._idle_heartbeat:
self._idle_heartbeat.stop()
self.scheduler.shutdown()
self.control_connection.shutdown()
for session in self.sessions:
session.shutdown()
self.executor.shutdown()
def _new_session(self):
session = Session(self, self.metadata.all_hosts())
for keyspace, type_map in six.iteritems(self._user_types):
for udt_name, klass in six.iteritems(type_map):
session.user_type_registered(keyspace, udt_name, klass)
self.sessions.add(session)
return session
def _cleanup_failed_on_up_handling(self, host):
self.load_balancing_policy.on_down(host)
self.control_connection.on_down(host)
for session in self.sessions:
session.remove_pool(host)
self._start_reconnector(host, is_host_addition=False)
def _on_up_future_completed(self, host, futures, results, lock, finished_future):
with lock:
futures.discard(finished_future)
try:
results.append(finished_future.result())
except Exception as exc:
results.append(exc)
if futures:
return
try:
# all futures have completed at this point
for exc in [f for f in results if isinstance(f, Exception)]:
log.error("Unexpected failure while marking node %s up:", host, exc_info=exc)
self._cleanup_failed_on_up_handling(host)
return
if not all(results):
log.debug("Connection pool could not be created, not marking node %s up", host)
self._cleanup_failed_on_up_handling(host)
return
log.info("Connection pools established for node %s", host)
# mark the host as up and notify all listeners
host.set_up()
for listener in self.listeners:
listener.on_up(host)
finally:
with host.lock:
host._currently_handling_node_up = False
# see if there are any pools to add or remove now that the host is marked up
for session in self.sessions:
session.update_created_pools()
def on_up(self, host):
"""
Intended for internal use only.
"""
if self.is_shutdown:
return
log.debug("Waiting to acquire lock for handling up status of node %s", host)
with host.lock:
if host._currently_handling_node_up:
log.debug("Another thread is already handling up status of node %s", host)
return
if host.is_up:
log.debug("Host %s was already marked up", host)
return
host._currently_handling_node_up = True
log.debug("Starting to handle up status of node %s", host)
have_future = False
futures = set()
try:
log.info("Host %s may be up; will prepare queries and open connection pool", host)
reconnector = host.get_and_set_reconnection_handler(None)
if reconnector:
log.debug("Now that host %s is up, cancelling the reconnection handler", host)
reconnector.cancel()
self._prepare_all_queries(host)
log.debug("Done preparing all queries for host %s, ", host)
for session in self.sessions:
session.remove_pool(host)
log.debug("Signalling to load balancing policy that host %s is up", host)
self.load_balancing_policy.on_up(host)
log.debug("Signalling to control connection that host %s is up", host)
self.control_connection.on_up(host)
log.debug("Attempting to open new connection pools for host %s", host)
futures_lock = Lock()
futures_results = []
callback = partial(self._on_up_future_completed, host, futures, futures_results, futures_lock)
for session in self.sessions:
future = session.add_or_renew_pool(host, is_host_addition=False)
if future is not None:
have_future = True
future.add_done_callback(callback)
futures.add(future)
except Exception:
log.exception("Unexpected failure handling node %s being marked up:", host)
for future in futures:
future.cancel()
self._cleanup_failed_on_up_handling(host)
with host.lock:
host._currently_handling_node_up = False
raise
else:
if not have_future:
with host.lock:
host._currently_handling_node_up = False
# for testing purposes
return futures
def _start_reconnector(self, host, is_host_addition):
if self.load_balancing_policy.distance(host) == HostDistance.IGNORED:
return
schedule = self.reconnection_policy.new_schedule()
# in order to not hold references to this Cluster open and prevent
# proper shutdown when the program ends, we'll just make a closure
# of the current Cluster attributes to create new Connections with
conn_factory = self._make_connection_factory(host)
reconnector = _HostReconnectionHandler(
host, conn_factory, is_host_addition, self.on_add, self.on_up,
self.scheduler, schedule, host.get_and_set_reconnection_handler,
new_handler=None)
old_reconnector = host.get_and_set_reconnection_handler(reconnector)
if old_reconnector:
log.debug("Old host reconnector found for %s, cancelling", host)
old_reconnector.cancel()
log.debug("Starting reconnector for host %s", host)
reconnector.start()
@run_in_executor
def on_down(self, host, is_host_addition, expect_host_to_be_down=False):
"""
Intended for internal use only.
"""
if self.is_shutdown:
return
with host.lock:
if (not host.is_up and not expect_host_to_be_down) or host.is_currently_reconnecting():
return
host.set_down()
log.warning("Host %s has been marked down", host)
self.load_balancing_policy.on_down(host)
self.control_connection.on_down(host)
for session in self.sessions:
session.on_down(host)
for listener in self.listeners:
listener.on_down(host)
self._start_reconnector(host, is_host_addition)
def on_add(self, host, refresh_nodes=True):
if self.is_shutdown:
return
log.debug("Handling new host %r and notifying listeners", host)
distance = self.load_balancing_policy.distance(host)
if distance != HostDistance.IGNORED:
self._prepare_all_queries(host)
log.debug("Done preparing queries for new host %r", host)
self.load_balancing_policy.on_add(host)
self.control_connection.on_add(host, refresh_nodes)
if distance == HostDistance.IGNORED:
log.debug("Not adding connection pool for new host %r because the "
"load balancing policy has marked it as IGNORED", host)
self._finalize_add(host)
return
futures_lock = Lock()
futures_results = []
futures = set()
def future_completed(future):
with futures_lock:
futures.discard(future)
try:
futures_results.append(future.result())
except Exception as exc:
futures_results.append(exc)
if futures:
return
log.debug('All futures have completed for added host %s', host)
for exc in [f for f in futures_results if isinstance(f, Exception)]:
log.error("Unexpected failure while adding node %s, will not mark up:", host, exc_info=exc)
return
if not all(futures_results):
log.warning("Connection pool could not be created, not marking node %s up", host)
return
self._finalize_add(host)
have_future = False
for session in self.sessions:
future = session.add_or_renew_pool(host, is_host_addition=True)
if future is not None:
have_future = True
futures.add(future)
future.add_done_callback(future_completed)
if not have_future:
self._finalize_add(host)
def _finalize_add(self, host):
# mark the host as up and notify all listeners
host.set_up()
for listener in self.listeners:
listener.on_add(host)
# see if there are any pools to add or remove now that the host is marked up
for session in self.sessions:
session.update_created_pools()
def on_remove(self, host):
if self.is_shutdown:
return
log.debug("Removing host %s", host)
host.set_down()
self.load_balancing_policy.on_remove(host)
for session in self.sessions:
session.on_remove(host)
for listener in self.listeners:
listener.on_remove(host)
self.control_connection.on_remove(host)
def signal_connection_failure(self, host, connection_exc, is_host_addition, expect_host_to_be_down=False):
is_down = host.signal_connection_failure(connection_exc)
if is_down:
self.on_down(host, is_host_addition, expect_host_to_be_down)
return is_down
def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nodes=True):
"""
Called when adding initial contact points and when the control
connection subsequently discovers a new node.
Returns a Host instance, and a flag indicating whether it was new in
the metadata.
Intended for internal use only.
"""
host, new = self.metadata.add_or_return_host(Host(address, self.conviction_policy_factory, datacenter, rack))
if new and signal:
log.info("New Cassandra host %r discovered", host)
self.on_add(host, refresh_nodes)
return host, new
def remove_host(self, host):
"""
Called when the control connection observes that a node has left the
ring. Intended for internal use only.
"""
if host and self.metadata.remove_host(host):
log.info("Cassandra host %s removed", host)
self.on_remove(host)
def register_listener(self, listener):
"""
Adds a :class:`cassandra.policies.HostStateListener` subclass instance to
the list of listeners to be notified when a host is added, removed,
marked up, or marked down.
"""
with self._listener_lock:
self._listeners.add(listener)
def unregister_listener(self, listener):
""" Removes a registered listener. """
with self._listener_lock:
self._listeners.remove(listener)
@property
def listeners(self):
with self._listener_lock:
return self._listeners.copy()
def _ensure_core_connections(self):
"""
If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met.
"""
for session in self.sessions:
for pool in session._pools.values():
pool.ensure_core_connections()
def refresh_schema(self, keyspace=None, table=None, usertype=None, max_schema_agreement_wait=None):
"""
Synchronously refresh the schema metadata.
By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait`
and :attr:`~.Cluster.control_connection_timeout`.
Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`.
Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately.
An Exception is raised if schema refresh fails for any reason.
"""
if not self.control_connection.refresh_schema(keyspace, table, usertype, max_schema_agreement_wait):
raise Exception("Schema was not refreshed. See log for details.")
def submit_schema_refresh(self, keyspace=None, table=None, usertype=None):
"""
Schedule a refresh of the internal representation of the current
schema for this cluster. If `keyspace` is specified, only that
keyspace will be refreshed, and likewise for `table`.
"""
return self.executor.submit(
self.control_connection.refresh_schema, keyspace, table, usertype)
def refresh_nodes(self):
"""
Synchronously refresh the node list and token metadata
An Exception is raised if node refresh fails for any reason.
"""
if not self.control_connection.refresh_node_list_and_token_map():
raise Exception("Node list was not refreshed. See log for details.")
def set_meta_refresh_enabled(self, enabled):
"""
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh must be enabled for the driver to become aware of any cluster
topology changes or schema updates.
"""
self.control_connection.set_meta_refresh_enabled(bool(enabled))
def _prepare_all_queries(self, host):
if not self._prepared_statements:
return
log.debug("Preparing all known prepared statements against host %s", host)
connection = None
try:
connection = self.connection_factory(host.address)
try:
self.control_connection.wait_for_schema_agreement(connection)
except Exception:
log.debug("Error waiting for schema agreement before preparing statements against host %s", host, exc_info=True)
statements = self._prepared_statements.values()
for keyspace, ks_statements in groupby(statements, lambda s: s.keyspace):
if keyspace is not None:
connection.set_keyspace_blocking(keyspace)
# prepare 10 statements at a time
ks_statements = list(ks_statements)
chunks = []
for i in range(0, len(ks_statements), 10):
chunks.append(ks_statements[i:i + 10])
for ks_chunk in chunks:
messages = [PrepareMessage(query=s.query_string) for s in ks_chunk]
# TODO: make this timeout configurable somehow?
responses = connection.wait_for_responses(*messages, timeout=5.0)
for response in responses:
if (not isinstance(response, ResultMessage) or
response.kind != RESULT_KIND_PREPARED):
log.debug("Got unexpected response when preparing "
"statement on host %s: %r", host, response)
log.debug("Done preparing all known prepared statements against host %s", host)
except OperationTimedOut as timeout:
log.warning("Timed out trying to prepare all statements on host %s: %s", host, timeout)
except (ConnectionException, socket.error) as exc:
log.warning("Error trying to prepare all statements on host %s: %r", host, exc)
except Exception:
log.exception("Error trying to prepare all statements on host %s", host)
finally:
if connection:
connection.close()
def prepare_on_all_sessions(self, query_id, prepared_statement, excluded_host):
with self._prepared_statement_lock:
self._prepared_statements[query_id] = prepared_statement
for session in self.sessions:
session.prepare_on_all_hosts(prepared_statement.query_string, excluded_host)
class Session(object):
"""
A collection of connection pools for each host in the cluster.
Instances of this class should not be created directly, only
using :meth:`.Cluster.connect()`.
Queries and statements can be executed through ``Session`` instances
using the :meth:`~.Session.execute()` and :meth:`~.Session.execute_async()`
methods.
Example usage::
>>> session = cluster.connect()
>>> session.set_keyspace("mykeyspace")
>>> session.execute("SELECT * FROM mycf")
"""
cluster = None
hosts = None
keyspace = None
is_shutdown = False
row_factory = staticmethod(named_tuple_factory)
"""
The format to return row results in. By default, each
returned row will be a named tuple. You can alternatively
use any of the following:
- :func:`cassandra.query.tuple_factory` - return a result row as a tuple
- :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple
- :func:`cassandra.query.dict_factory` - return a result row as a dict
- :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict
"""
default_timeout = 10.0
"""
A default timeout, measured in seconds, for queries executed through
:meth:`.execute()` or :meth:`.execute_async()`. This default may be
overridden with the `timeout` parameter for either of those methods
or the `timeout` parameter for :meth:`.ResponseFuture.result()`.
Setting this to :const:`None` will cause no timeouts to be set by default.
**Important**: This timeout currently has no effect on callbacks registered
on a :class:`~.ResponseFuture` through :meth:`.ResponseFuture.add_callback` or
:meth:`.ResponseFuture.add_errback`; even if a query exceeds this default
timeout, neither the registered callback or errback will be called.
.. versionadded:: 2.0.0
"""
default_consistency_level = ConsistencyLevel.ONE
"""
The default :class:`~ConsistencyLevel` for operations executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.consistency_level` on individual statements.
.. versionadded:: 1.2.0
"""
max_trace_wait = 2.0
"""
The maximum amount of time (in seconds) the driver will wait for trace
details to be populated server-side for a query before giving up.
If the `trace` parameter for :meth:`~.execute()` or :meth:`~.execute_async()`
is :const:`True`, the driver will repeatedly attempt to fetch trace
details for the query (using exponential backoff) until this limit is
hit. If the limit is passed, an error will be logged and the
:attr:`.Statement.trace` will be left as :const:`None`. """
default_fetch_size = 5000
"""
By default, this many rows will be fetched at a time. Setting
this to :const:`None` will disable automatic paging for large query
results. The fetch size can be also specified per-query through
:attr:`.Statement.fetch_size`.
This only takes effect when protocol version 2 or higher is used.
See :attr:`.Cluster.protocol_version` for details.
.. versionadded:: 2.0.0
"""
use_client_timestamp = True
"""
When using protocol version 3 or higher, write timestamps may be supplied
client-side at the protocol level. (Normally they are generated
server-side by the coordinator node.) Note that timestamps specified
within a CQL query will override this timestamp.
.. versionadded:: 2.1.0
"""
encoder = None
"""
A :class:`~cassandra.encoder.Encoder` instance that will be used when
formatting query parameters for non-prepared statements. This is not used
for prepared statements (because prepared statements give the driver more
information about what CQL types are expected, allowing it to accept a
wider range of python types).
The encoder uses a mapping from python types to encoder methods (for
specific CQL types). This mapping can be be modified by users as they see
fit. Methods of :class:`~cassandra.encoder.Encoder` should be used for mapping
values if possible, because they take precautions to avoid injections and
properly sanitize data.
Example::
cluster = Cluster()
session = cluster.connect("mykeyspace")
session.encoder.mapping[tuple] = session.encoder.cql_encode_tuple
session.execute("CREATE TABLE mytable (k int PRIMARY KEY, col tuple<int, ascii>)")
session.execute("INSERT INTO mytable (k, col) VALUES (%s, %s)", [0, (123, 'abc')])
.. versionadded:: 2.1.0
"""
_lock = None
_pools = None
_load_balancer = None
_metrics = None
_protocol_version = None
def __init__(self, cluster, hosts):
self.cluster = cluster
self.hosts = hosts
self._lock = RLock()
self._pools = {}
self._load_balancer = cluster.load_balancing_policy
self._metrics = cluster.metrics
self._protocol_version = self.cluster.protocol_version
self.encoder = Encoder()
# create connection pools in parallel
futures = []
for host in hosts:
future = self.add_or_renew_pool(host, is_host_addition=False)
if future is not None:
futures.append(future)
for future in futures:
future.result()
def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False):
"""
Execute the given query and synchronously wait for the response.
If an error is encountered while executing the query, an Exception
will be raised.
`query` may be a query string or an instance of :class:`cassandra.query.Statement`.
`parameters` may be a sequence or dict of parameters to bind. If a
sequence is used, ``%s`` should be used the placeholder for each
argument. If a dict is used, ``%(name)s`` style placeholders must
be used.
`timeout` should specify a floating-point timeout (in seconds) after
which an :exc:`.OperationTimedOut` exception will be raised if the query
has not completed. If not set, the timeout defaults to
:attr:`~.Session.default_timeout`. If set to :const:`None`, there is
no timeout.
If `trace` is set to :const:`True`, an attempt will be made to
fetch the trace details and attach them to the `query`'s
:attr:`~.Statement.trace` attribute in the form of a :class:`.QueryTrace`
instance. This requires that `query` be a :class:`.Statement` subclass
instance and not just a string. If there is an error fetching the
trace details, the :attr:`~.Statement.trace` attribute will be left as
:const:`None`.
"""
if timeout is _NOT_SET:
timeout = self.default_timeout
if trace and not isinstance(query, Statement):
raise TypeError(
"The query argument must be an instance of a subclass of "
"cassandra.query.Statement when trace=True")
future = self.execute_async(query, parameters, trace)
try:
result = future.result(timeout)
finally:
if trace:
try:
query.trace = future.get_query_trace(self.max_trace_wait)
except Exception:
log.exception("Unable to fetch query trace:")
return result
def execute_async(self, query, parameters=None, trace=False):
"""
Execute the given query and return a :class:`~.ResponseFuture` object
which callbacks may be attached to for asynchronous response
delivery. You may also call :meth:`~.ResponseFuture.result()`
on the :class:`.ResponseFuture` to syncronously block for results at
any time.
If `trace` is set to :const:`True`, you may call
:meth:`.ResponseFuture.get_query_trace()` after the request
completes to retrieve a :class:`.QueryTrace` instance.
Example usage::
>>> session = cluster.connect()
>>> future = session.execute_async("SELECT * FROM mycf")
>>> def log_results(results):
... for row in results:
... log.info("Results: %s", row)
>>> def log_error(exc):
>>> log.error("Operation failed: %s", exc)
>>> future.add_callbacks(log_results, log_error)
Async execution with blocking wait for results::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... results = future.result()
... except Exception:
... log.exception("Operation failed:")
"""
future = self._create_response_future(query, parameters, trace)
future.send_request()
return future
def _create_response_future(self, query, parameters, trace):
""" Returns the ResponseFuture before calling send_request() on it """
prepared_statement = None
if isinstance(query, six.string_types):
query = SimpleStatement(query)
elif isinstance(query, PreparedStatement):
query = query.bind(parameters)
cl = query.consistency_level if query.consistency_level is not None else self.default_consistency_level
fetch_size = query.fetch_size
if fetch_size is FETCH_SIZE_UNSET and self._protocol_version >= 2:
fetch_size = self.default_fetch_size
elif self._protocol_version == 1:
fetch_size = None
if self._protocol_version >= 3 and self.use_client_timestamp:
timestamp = int(time.time() * 1e6)
else:
timestamp = None
if isinstance(query, SimpleStatement):
query_string = query.query_string
if six.PY2 and isinstance(query_string, six.text_type):
query_string = query_string.encode('utf-8')
if parameters:
query_string = bind_params(query_string, parameters, self.encoder)
message = QueryMessage(
query_string, cl, query.serial_consistency_level,
fetch_size, timestamp=timestamp)
elif isinstance(query, BoundStatement):
message = ExecuteMessage(
query.prepared_statement.query_id, query.values, cl,
query.serial_consistency_level, fetch_size,
timestamp=timestamp)
prepared_statement = query.prepared_statement
elif isinstance(query, BatchStatement):
if self._protocol_version < 2:
raise UnsupportedOperation(
"BatchStatement execution is only supported with protocol version "
"2 or higher (supported in Cassandra 2.0 and higher). Consider "
"setting Cluster.protocol_version to 2 to support this operation.")
message = BatchMessage(
query.batch_type, query._statements_and_parameters, cl,
query.serial_consistency_level, timestamp)
if trace:
message.tracing = True
return ResponseFuture(
self, message, query, self.default_timeout, metrics=self._metrics,
prepared_statement=prepared_statement)
def prepare(self, query):
"""
Prepares a query string, returing a :class:`~cassandra.query.PreparedStatement`
instance which can be used as follows::
>>> session = cluster.connect("mykeyspace")
>>> query = "INSERT INTO users (id, name, age) VALUES (?, ?, ?)"
>>> prepared = session.prepare(query)
>>> session.execute(prepared, (user.id, user.name, user.age))
Or you may bind values to the prepared statement ahead of time::
>>> prepared = session.prepare(query)
>>> bound_stmt = prepared.bind((user.id, user.name, user.age))
>>> session.execute(bound_stmt)
Of course, prepared statements may (and should) be reused::
>>> prepared = session.prepare(query)
>>> for user in users:
... bound = prepared.bind((user.id, user.name, user.age))
... session.execute(bound)
**Important**: PreparedStatements should be prepared only once.
Preparing the same query more than once will likely affect performance.
"""
message = PrepareMessage(query=query)
future = ResponseFuture(self, message, query=None)
try:
future.send_request()
query_id, column_metadata = future.result(self.default_timeout)
except Exception:
log.exception("Error preparing query:")
raise
prepared_statement = PreparedStatement.from_message(
query_id, column_metadata, self.cluster.metadata, query, self.keyspace,
self._protocol_version)
host = future._current_host
try:
self.cluster.prepare_on_all_sessions(query_id, prepared_statement, host)
except Exception:
log.exception("Error preparing query on all hosts:")
return prepared_statement
def prepare_on_all_hosts(self, query, excluded_host):
"""
Prepare the given query on all hosts, excluding ``excluded_host``.
Intended for internal use only.
"""
futures = []
for host in self._pools.keys():
if host != excluded_host and host.is_up:
future = ResponseFuture(self, PrepareMessage(query=query), None)
# we don't care about errors preparing against specific hosts,
# since we can always prepare them as needed when the prepared
# statement is used. Just log errors and continue on.
try:
request_id = future._query(host)
except Exception:
log.exception("Error preparing query for host %s:", host)
continue
if request_id is None:
# the error has already been logged by ResponsFuture
log.debug("Failed to prepare query for host %s: %r",
host, future._errors.get(host))
continue
futures.append((host, future))
for host, future in futures:
try:
future.result(self.default_timeout)
except Exception:
log.exception("Error preparing query for host %s:", host)
def shutdown(self):
"""
Close all connections. ``Session`` instances should not be used
for any purpose after being shutdown.
"""
with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
for pool in self._pools.values():
pool.shutdown()
def add_or_renew_pool(self, host, is_host_addition):
"""
For internal use only.
"""
distance = self._load_balancer.distance(host)
if distance == HostDistance.IGNORED:
return None
def run_add_or_renew_pool():
try:
if self._protocol_version >= 3:
new_pool = HostConnection(host, distance, self)
else:
new_pool = HostConnectionPool(host, distance, self)
except AuthenticationFailed as auth_exc:
conn_exc = ConnectionException(str(auth_exc), host=host)
self.cluster.signal_connection_failure(host, conn_exc, is_host_addition)
return False
except Exception as conn_exc:
log.warning("Failed to create connection pool for new host %s:",
host, exc_info=conn_exc)
# the host itself will still be marked down, so we need to pass
# a special flag to make sure the reconnector is created
self.cluster.signal_connection_failure(
host, conn_exc, is_host_addition, expect_host_to_be_down=True)
return False
previous = self._pools.get(host)
self._pools[host] = new_pool
log.debug("Added pool for host %s to session", host)
if previous:
previous.shutdown()
return True
return self.submit(run_add_or_renew_pool)
def remove_pool(self, host):
pool = self._pools.pop(host, None)
if pool:
log.debug("Removed connection pool for %r", host)
return self.submit(pool.shutdown)
else:
return None
def update_created_pools(self):
"""
When the set of live nodes change, the loadbalancer will change its
mind on host distances. It might change it on the node that came/left
but also on other nodes (for instance, if a node dies, another
previously ignored node may be now considered).
This method ensures that all hosts for which a pool should exist
have one, and hosts that shouldn't don't.
For internal use only.
"""
for host in self.cluster.metadata.all_hosts():
distance = self._load_balancer.distance(host)
pool = self._pools.get(host)
if not pool or pool.is_shutdown:
if distance != HostDistance.IGNORED and host.is_up:
self.add_or_renew_pool(host, False)
elif distance != pool.host_distance:
# the distance has changed
if distance == HostDistance.IGNORED:
self.remove_pool(host)
else:
pool.host_distance = distance
def on_down(self, host):
"""
Called by the parent Cluster instance when a node is marked down.
Only intended for internal use.
"""
future = self.remove_pool(host)
if future:
future.add_done_callback(lambda f: self.update_created_pools())
def on_remove(self, host):
""" Internal """
self.on_down(host)
def set_keyspace(self, keyspace):
"""
Set the default keyspace for all queries made through this Session.
This operation blocks until complete.
"""
self.execute('USE %s' % (protect_name(keyspace),))
def _set_keyspace_for_all_pools(self, keyspace, callback):
"""
Asynchronously sets the keyspace on all pools. When all
pools have set all of their connections, `callback` will be
called with a dictionary of all errors that occurred, keyed
by the `Host` that they occurred against.
"""
self.keyspace = keyspace
remaining_callbacks = set(self._pools.values())
errors = {}
if not remaining_callbacks:
callback(errors)
return
def pool_finished_setting_keyspace(pool, host_errors):
remaining_callbacks.remove(pool)
if host_errors:
errors[pool.host] = host_errors
if not remaining_callbacks:
callback(host_errors)
for pool in self._pools.values():
pool._set_keyspace_for_all_conns(keyspace, pool_finished_setting_keyspace)
def user_type_registered(self, keyspace, user_type, klass):
"""
Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only.
"""
try:
ks_meta = self.cluster.metadata.keyspaces[keyspace]
except KeyError:
raise UserTypeDoesNotExist(
'Keyspace %s does not exist or has not been discovered by the driver' % (keyspace,))
try:
type_meta = ks_meta.user_types[user_type]
except KeyError:
raise UserTypeDoesNotExist(
'User type %s does not exist in keyspace %s' % (user_type, keyspace))
def encode(val):
return '{ %s }' % ' , '.join('%s : %s' % (
field_name,
self.encoder.cql_encode_all_types(getattr(val, field_name, None))
) for field_name in type_meta.field_names)
self.encoder.mapping[klass] = encode
def submit(self, fn, *args, **kwargs):
""" Internal """
if not self.is_shutdown:
return self.cluster.executor.submit(fn, *args, **kwargs)
def get_pool_state(self):
return dict((host, pool.get_state()) for host, pool in self._pools.items())
def get_pools(self):
return self._pools.values()
class UserTypeDoesNotExist(Exception):
"""
An attempt was made to use a user-defined type that does not exist.
.. versionadded:: 2.1.0
"""
pass
class _ControlReconnectionHandler(_ReconnectionHandler):
"""
Internal
"""
def __init__(self, control_connection, *args, **kwargs):
_ReconnectionHandler.__init__(self, *args, **kwargs)
self.control_connection = weakref.proxy(control_connection)
def try_reconnect(self):
# we'll either get back a new Connection or a NoHostAvailable
return self.control_connection._reconnect_internal()
def on_reconnection(self, connection):
self.control_connection._set_new_connection(connection)
def on_exception(self, exc, next_delay):
# TODO only overridden to add logging, so add logging
if isinstance(exc, AuthenticationFailed):
return False
else:
log.debug("Error trying to reconnect control connection: %r", exc)
return True
def _watch_callback(obj_weakref, method_name, *args, **kwargs):
"""
A callback handler for the ControlConnection that tolerates
weak references.
"""
obj = obj_weakref()
if obj is None:
return
getattr(obj, method_name)(*args, **kwargs)
def _clear_watcher(conn, expiring_weakref):
"""
Called when the ControlConnection object is about to be finalized.
This clears watchers on the underlying Connection object.
"""
try:
conn.control_conn_disposed()
except ReferenceError:
pass
class ControlConnection(object):
"""
Internal
"""
_SELECT_KEYSPACES = "SELECT * FROM system.schema_keyspaces"
_SELECT_COLUMN_FAMILIES = "SELECT * FROM system.schema_columnfamilies"
_SELECT_COLUMNS = "SELECT * FROM system.schema_columns"
_SELECT_USERTYPES = "SELECT * FROM system.schema_usertypes"
_SELECT_TRIGGERS = "SELECT * FROM system.schema_triggers"
_SELECT_PEERS = "SELECT peer, data_center, rack, tokens, rpc_address, schema_version FROM system.peers"
_SELECT_LOCAL = "SELECT cluster_name, data_center, rack, tokens, partitioner, schema_version FROM system.local WHERE key='local'"
_SELECT_SCHEMA_PEERS = "SELECT peer, rpc_address, schema_version FROM system.peers"
_SELECT_SCHEMA_LOCAL = "SELECT schema_version FROM system.local WHERE key='local'"
_is_shutdown = False
_timeout = None
_protocol_version = None
_schema_event_refresh_window = None
_topology_event_refresh_window = None
_meta_refresh_enabled = True
# for testing purposes
_time = time
def __init__(self, cluster, timeout,
schema_event_refresh_window,
topology_event_refresh_window):
# use a weak reference to allow the Cluster instance to be GC'ed (and
# shutdown) since implementing __del__ disables the cycle detector
self._cluster = weakref.proxy(cluster)
self._connection = None
self._timeout = timeout
self._schema_event_refresh_window = schema_event_refresh_window
self._topology_event_refresh_window = topology_event_refresh_window
self._lock = RLock()
self._schema_agreement_lock = Lock()
self._reconnection_handler = None
self._reconnection_lock = RLock()
def connect(self):
if self._is_shutdown:
return
self._protocol_version = self._cluster.protocol_version
self._set_new_connection(self._reconnect_internal())
def _set_new_connection(self, conn):
"""
Replace existing connection (if there is one) and close it.
"""
with self._lock:
old = self._connection
self._connection = conn
if old:
log.debug("[control connection] Closing old connection %r, replacing with %r", old, conn)
old.close()
def _reconnect_internal(self):
"""
Tries to connect to each host in the query plan until one succeeds
or every attempt fails. If successful, a new Connection will be
returned. Otherwise, :exc:`NoHostAvailable` will be raised
with an "errors" arg that is a dict mapping host addresses
to the exception that was raised when an attempt was made to open
a connection to that host.
"""
errors = {}
for host in self._cluster.load_balancing_policy.make_query_plan():
try:
return self._try_connect(host)
except ConnectionException as exc:
errors[host.address] = exc
log.warning("[control connection] Error connecting to %s:", host, exc_info=True)
self._cluster.signal_connection_failure(host, exc, is_host_addition=False)
except Exception as exc:
errors[host.address] = exc
log.warning("[control connection] Error connecting to %s:", host, exc_info=True)
raise NoHostAvailable("Unable to connect to any servers", errors)
def _try_connect(self, host):
"""
Creates a new Connection, registers for pushed events, and refreshes
node/token and schema metadata.
"""
log.debug("[control connection] Opening new connection to %s", host)
connection = self._cluster.connection_factory(host.address, is_control_connection=True)
log.debug("[control connection] Established new connection %r, "
"registering watchers and refreshing schema and topology",
connection)
# use weak references in both directions
# _clear_watcher will be called when this ControlConnection is about to be finalized
# _watch_callback will get the actual callback from the Connection and relay it to
# this object (after a dereferencing a weakref)
self_weakref = weakref.ref(self, callback=partial(_clear_watcher, weakref.proxy(connection)))
try:
connection.register_watchers({
"TOPOLOGY_CHANGE": partial(_watch_callback, self_weakref, '_handle_topology_change'),
"STATUS_CHANGE": partial(_watch_callback, self_weakref, '_handle_status_change'),
"SCHEMA_CHANGE": partial(_watch_callback, self_weakref, '_handle_schema_change')
}, register_timeout=self._timeout)
peers_query = QueryMessage(query=self._SELECT_PEERS, consistency_level=ConsistencyLevel.ONE)
local_query = QueryMessage(query=self._SELECT_LOCAL, consistency_level=ConsistencyLevel.ONE)
shared_results = connection.wait_for_responses(
peers_query, local_query, timeout=self._timeout)
self._refresh_node_list_and_token_map(connection, preloaded_results=shared_results)
self._refresh_schema(connection, preloaded_results=shared_results)
if not self._cluster.metadata.keyspaces:
log.warning("[control connection] No schema built on connect; retrying without wait for schema agreement")
self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=0)
except Exception:
connection.close()
raise
return connection
def reconnect(self):
if self._is_shutdown:
return
self._submit(self._reconnect)
def _reconnect(self):
log.debug("[control connection] Attempting to reconnect")
try:
self._set_new_connection(self._reconnect_internal())
except NoHostAvailable:
# make a retry schedule (which includes backoff)
schedule = self.cluster.reconnection_policy.new_schedule()
with self._reconnection_lock:
# cancel existing reconnection attempts
if self._reconnection_handler:
self._reconnection_handler.cancel()
# when a connection is successfully made, _set_new_connection
# will be called with the new connection and then our
# _reconnection_handler will be cleared out
self._reconnection_handler = _ControlReconnectionHandler(
self, self._cluster.scheduler, schedule,
self._get_and_set_reconnection_handler,
new_handler=None)
self._reconnection_handler.start()
except Exception:
log.debug("[control connection] error reconnecting", exc_info=True)
raise
def _get_and_set_reconnection_handler(self, new_handler):
"""
Called by the _ControlReconnectionHandler when a new connection
is successfully created. Clears out the _reconnection_handler on
this ControlConnection.
"""
with self._reconnection_lock:
old = self._reconnection_handler
self._reconnection_handler = new_handler
return old
def _submit(self, *args, **kwargs):
try:
if not self._cluster.is_shutdown:
return self._cluster.executor.submit(*args, **kwargs)
except ReferenceError:
pass
return None
def shutdown(self):
with self._lock:
if self._is_shutdown:
return
else:
self._is_shutdown = True
log.debug("Shutting down control connection")
# stop trying to reconnect (if we are)
if self._reconnection_handler:
self._reconnection_handler.cancel()
if self._connection:
self._connection.close()
del self._connection
def refresh_schema(self, keyspace=None, table=None, usertype=None,
schema_agreement_wait=None):
if not self._meta_refresh_enabled:
log.debug("[control connection] Skipping schema refresh because meta refresh is disabled")
return False
try:
if self._connection:
return self._refresh_schema(self._connection, keyspace, table, usertype,
schema_agreement_wait=schema_agreement_wait)
except ReferenceError:
pass # our weak reference to the Cluster is no good
except Exception:
log.debug("[control connection] Error refreshing schema", exc_info=True)
self._signal_error()
return False
def _refresh_schema(self, connection, keyspace=None, table=None, usertype=None,
preloaded_results=None, schema_agreement_wait=None):
if self._cluster.is_shutdown:
return False
assert table is None or usertype is None
agreed = self.wait_for_schema_agreement(connection,
preloaded_results=preloaded_results,
wait_time=schema_agreement_wait)
if not agreed:
log.debug("Skipping schema refresh due to lack of schema agreement")
return False
cl = ConsistencyLevel.ONE
if table:
def _handle_results(success, result):
if success:
return dict_factory(*result.results) if result else {}
else:
raise result
# a particular table changed
where_clause = " WHERE keyspace_name = '%s' AND columnfamily_name = '%s'" % (keyspace, table)
cf_query = QueryMessage(query=self._SELECT_COLUMN_FAMILIES + where_clause, consistency_level=cl)
col_query = QueryMessage(query=self._SELECT_COLUMNS + where_clause, consistency_level=cl)
triggers_query = QueryMessage(query=self._SELECT_TRIGGERS + where_clause, consistency_level=cl)
(cf_success, cf_result), (col_success, col_result), (triggers_success, triggers_result) \
= connection.wait_for_responses(cf_query, col_query, triggers_query, timeout=self._timeout, fail_on_error=False)
log.debug("[control connection] Fetched table info for %s.%s, rebuilding metadata", keyspace, table)
cf_result = _handle_results(cf_success, cf_result)
col_result = _handle_results(col_success, col_result)
# handle the triggers table not existing in Cassandra 1.2
if not triggers_success and isinstance(triggers_result, InvalidRequest):
triggers_result = {}
else:
triggers_result = _handle_results(triggers_success, triggers_result)
self._cluster.metadata.table_changed(keyspace, table, cf_result, col_result, triggers_result)
elif usertype:
# user defined types within this keyspace changed
where_clause = " WHERE keyspace_name = '%s' AND type_name = '%s'" % (keyspace, usertype)
types_query = QueryMessage(query=self._SELECT_USERTYPES + where_clause, consistency_level=cl)
types_result = connection.wait_for_response(types_query)
log.debug("[control connection] Fetched user type info for %s.%s, rebuilding metadata", keyspace, usertype)
types_result = dict_factory(*types_result.results) if types_result.results else {}
self._cluster.metadata.usertype_changed(keyspace, usertype, types_result)
elif keyspace:
# only the keyspace itself changed (such as replication settings)
where_clause = " WHERE keyspace_name = '%s'" % (keyspace,)
ks_query = QueryMessage(query=self._SELECT_KEYSPACES + where_clause, consistency_level=cl)
ks_result = connection.wait_for_response(ks_query)
log.debug("[control connection] Fetched keyspace info for %s, rebuilding metadata", keyspace)
ks_result = dict_factory(*ks_result.results) if ks_result.results else {}
self._cluster.metadata.keyspace_changed(keyspace, ks_result)
else:
# build everything from scratch
queries = [
QueryMessage(query=self._SELECT_KEYSPACES, consistency_level=cl),
QueryMessage(query=self._SELECT_COLUMN_FAMILIES, consistency_level=cl),
QueryMessage(query=self._SELECT_COLUMNS, consistency_level=cl),
QueryMessage(query=self._SELECT_USERTYPES, consistency_level=cl),
QueryMessage(query=self._SELECT_TRIGGERS, consistency_level=cl)
]
responses = connection.wait_for_responses(*queries, timeout=self._timeout, fail_on_error=False)
(ks_success, ks_result), (cf_success, cf_result), \
(col_success, col_result), (types_success, types_result), \
(trigger_success, triggers_result) = responses
if ks_success:
ks_result = dict_factory(*ks_result.results)
else:
raise ks_result
if cf_success:
cf_result = dict_factory(*cf_result.results)
else:
raise cf_result
if col_success:
col_result = dict_factory(*col_result.results)
else:
raise col_result
# if we're connected to Cassandra < 2.0, the trigges table will not exist
if trigger_success:
triggers_result = dict_factory(*triggers_result.results)
else:
if isinstance(triggers_result, InvalidRequest):
log.debug("[control connection] triggers table not found")
triggers_result = {}
elif isinstance(triggers_result, Unauthorized):
log.warning("[control connection] this version of Cassandra does not allow access to schema_triggers metadata with authorization enabled (CASSANDRA-7967); "
"The driver will operate normally, but will not reflect triggers in the local metadata model, or schema strings.")
triggers_result = {}
else:
raise triggers_result
# if we're connected to Cassandra < 2.1, the usertypes table will not exist
if types_success:
types_result = dict_factory(*types_result.results) if types_result.results else {}
else:
if isinstance(types_result, InvalidRequest):
log.debug("[control connection] user types table not found")
types_result = {}
else:
raise types_result
log.debug("[control connection] Fetched schema, rebuilding metadata")
self._cluster.metadata.rebuild_schema(ks_result, types_result, cf_result, col_result, triggers_result)
return True
def refresh_node_list_and_token_map(self, force_token_rebuild=False):
if not self._meta_refresh_enabled:
log.debug("[control connection] Skipping node list refresh because meta refresh is disabled")
return False
try:
if self._connection:
self._refresh_node_list_and_token_map(self._connection, force_token_rebuild=force_token_rebuild)
return True
except ReferenceError:
pass # our weak reference to the Cluster is no good
except Exception:
log.debug("[control connection] Error refreshing node list and token map", exc_info=True)
self._signal_error()
return False
def _refresh_node_list_and_token_map(self, connection, preloaded_results=None,
force_token_rebuild=False):
if preloaded_results:
log.debug("[control connection] Refreshing node list and token map using preloaded results")
peers_result = preloaded_results[0]
local_result = preloaded_results[1]
else:
log.debug("[control connection] Refreshing node list and token map")
cl = ConsistencyLevel.ONE
peers_query = QueryMessage(query=self._SELECT_PEERS, consistency_level=cl)
local_query = QueryMessage(query=self._SELECT_LOCAL, consistency_level=cl)
peers_result, local_result = connection.wait_for_responses(
peers_query, local_query, timeout=self._timeout)
peers_result = dict_factory(*peers_result.results)
partitioner = None
token_map = {}
if local_result.results:
local_rows = dict_factory(*(local_result.results))
local_row = local_rows[0]
cluster_name = local_row["cluster_name"]
self._cluster.metadata.cluster_name = cluster_name
host = self._cluster.metadata.get_host(connection.host)
if host:
datacenter = local_row.get("data_center")
rack = local_row.get("rack")
self._update_location_info(host, datacenter, rack)
partitioner = local_row.get("partitioner")
tokens = local_row.get("tokens")
if partitioner and tokens:
token_map[host] = tokens
# Check metadata.partitioner to see if we haven't built anything yet. If
# every node in the cluster was in the contact points, we won't discover
# any new nodes, so we need this additional check. (See PYTHON-90)
should_rebuild_token_map = force_token_rebuild or self._cluster.metadata.partitioner is None
found_hosts = set()
for row in peers_result:
addr = row.get("rpc_address")
if not addr or addr in ["0.0.0.0", "::"]:
addr = row.get("peer")
tokens = row.get("tokens")
if not tokens:
log.warning("Excluding host (%s) with no tokens in system.peers table of %s." % (addr, connection.host))
continue
found_hosts.add(addr)
host = self._cluster.metadata.get_host(addr)
datacenter = row.get("data_center")
rack = row.get("rack")
if host is None:
log.debug("[control connection] Found new host to connect to: %s", addr)
host, _ = self._cluster.add_host(addr, datacenter, rack, signal=True, refresh_nodes=False)
should_rebuild_token_map = True
else:
should_rebuild_token_map |= self._update_location_info(host, datacenter, rack)
if partitioner and tokens:
token_map[host] = tokens
for old_host in self._cluster.metadata.all_hosts():
if old_host.address != connection.host and old_host.address not in found_hosts:
should_rebuild_token_map = True
if old_host.address not in self._cluster.contact_points:
log.debug("[control connection] Found host that has been removed: %r", old_host)
self._cluster.remove_host(old_host)
log.debug("[control connection] Finished fetching ring info")
if partitioner and should_rebuild_token_map:
log.debug("[control connection] Rebuilding token map due to topology changes")
self._cluster.metadata.rebuild_token_map(partitioner, token_map)
def _update_location_info(self, host, datacenter, rack):
if host.datacenter == datacenter and host.rack == rack:
return False
# If the dc/rack information changes, we need to update the load balancing policy.
# For that, we remove and re-add the node against the policy. Not the most elegant, and assumes
# that the policy will update correctly, but in practice this should work.
self._cluster.load_balancing_policy.on_down(host)
host.set_location_info(datacenter, rack)
self._cluster.load_balancing_policy.on_up(host)
return True
def _handle_topology_change(self, event):
change_type = event["change_type"]
addr, port = event["address"]
if change_type == "NEW_NODE" or change_type == "MOVED_NODE":
if self._topology_event_refresh_window >= 0:
delay = random() * self._topology_event_refresh_window
self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map)
elif change_type == "REMOVED_NODE":
host = self._cluster.metadata.get_host(addr)
self._cluster.scheduler.schedule_unique(0, self._cluster.remove_host, host)
def _handle_status_change(self, event):
change_type = event["change_type"]
addr, port = event["address"]
host = self._cluster.metadata.get_host(addr)
if change_type == "UP":
delay = 1 + random() * 0.5 # randomness to avoid thundering herd problem on events
if host is None:
# this is the first time we've seen the node
self._cluster.scheduler.schedule_unique(delay, self.refresh_node_list_and_token_map)
else:
self._cluster.scheduler.schedule_unique(delay, self._cluster.on_up, host)
elif change_type == "DOWN":
# Note that there is a slight risk we can receive the event late and thus
# mark the host down even though we already had reconnected successfully.
# But it is unlikely, and don't have too much consequence since we'll try reconnecting
# right away, so we favor the detection to make the Host.is_up more accurate.
if host is not None:
# this will be run by the scheduler
self._cluster.on_down(host, is_host_addition=False)
def _handle_schema_change(self, event):
if self._schema_event_refresh_window < 0:
return
keyspace = event.get('keyspace')
table = event.get('table')
usertype = event.get('type')
delay = random() * self._schema_event_refresh_window
self._cluster.scheduler.schedule_unique(delay, self.refresh_schema, keyspace, table, usertype)
def wait_for_schema_agreement(self, connection=None, preloaded_results=None, wait_time=None):
total_timeout = wait_time if wait_time is not None else self._cluster.max_schema_agreement_wait
if total_timeout <= 0:
return True
# Each schema change typically generates two schema refreshes, one
# from the response type and one from the pushed notification. Holding
# a lock is just a simple way to cut down on the number of schema queries
# we'll make.
with self._schema_agreement_lock:
if self._is_shutdown:
return
if not connection:
connection = self._connection
if preloaded_results:
log.debug("[control connection] Attempting to use preloaded results for schema agreement")
peers_result = preloaded_results[0]
local_result = preloaded_results[1]
schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.host)
if schema_mismatches is None:
return True
log.debug("[control connection] Waiting for schema agreement")
start = self._time.time()
elapsed = 0
cl = ConsistencyLevel.ONE
schema_mismatches = None
while elapsed < total_timeout:
peers_query = QueryMessage(query=self._SELECT_SCHEMA_PEERS, consistency_level=cl)
local_query = QueryMessage(query=self._SELECT_SCHEMA_LOCAL, consistency_level=cl)
try:
timeout = min(self._timeout, total_timeout - elapsed)
peers_result, local_result = connection.wait_for_responses(
peers_query, local_query, timeout=timeout)
except OperationTimedOut as timeout:
log.debug("[control connection] Timed out waiting for "
"response during schema agreement check: %s", timeout)
elapsed = self._time.time() - start
continue
except ConnectionShutdown:
if self._is_shutdown:
log.debug("[control connection] Aborting wait for schema match due to shutdown")
return None
else:
raise
schema_mismatches = self._get_schema_mismatches(peers_result, local_result, connection.host)
if schema_mismatches is None:
return True
log.debug("[control connection] Schemas mismatched, trying again")
self._time.sleep(0.2)
elapsed = self._time.time() - start
log.warning("Node %s is reporting a schema disagreement: %s",
connection.host, schema_mismatches)
return False
def _get_schema_mismatches(self, peers_result, local_result, local_address):
peers_result = dict_factory(*peers_result.results)
versions = defaultdict(set)
if local_result.results:
local_row = dict_factory(*local_result.results)[0]
if local_row.get("schema_version"):
versions[local_row.get("schema_version")].add(local_address)
for row in peers_result:
schema_ver = row.get('schema_version')
if not schema_ver:
continue
addr = row.get("rpc_address")
if not addr or addr in ["0.0.0.0", "::"]:
addr = row.get("peer")
peer = self._cluster.metadata.get_host(addr)
if peer and peer.is_up:
versions[schema_ver].add(addr)
if len(versions) == 1:
log.debug("[control connection] Schemas match")
return None
return dict((version, list(nodes)) for version, nodes in six.iteritems(versions))
def _signal_error(self):
# try just signaling the cluster, as this will trigger a reconnect
# as part of marking the host down
if self._connection and self._connection.is_defunct:
host = self._cluster.metadata.get_host(self._connection.host)
# host may be None if it's already been removed, but that indicates
# that errors have already been reported, so we're fine
if host:
self._cluster.signal_connection_failure(
host, self._connection.last_error, is_host_addition=False)
return
# if the connection is not defunct or the host already left, reconnect
# manually
self.reconnect()
def on_up(self, host):
pass
def on_down(self, host):
conn = self._connection
if conn and conn.host == host.address and \
self._reconnection_handler is None:
log.debug("[control connection] Control connection host (%s) is "
"considered down, starting reconnection", host)
# this will result in a task being submitted to the executor to reconnect
self.reconnect()
def on_add(self, host, refresh_nodes=True):
if refresh_nodes:
self.refresh_node_list_and_token_map(force_token_rebuild=True)
def on_remove(self, host):
self.refresh_node_list_and_token_map(force_token_rebuild=True)
def get_connections(self):
c = getattr(self, '_connection', None)
return [c] if c else []
def return_connection(self, connection):
if connection is self._connection and (connection.is_defunct or connection.is_closed):
self.reconnect()
def set_meta_refresh_enabled(self, enabled):
self._meta_refresh_enabled = enabled
def _stop_scheduler(scheduler, thread):
try:
if not scheduler.is_shutdown:
scheduler.shutdown()
except ReferenceError:
pass
thread.join()
class _Scheduler(object):
_queue = None
_scheduled_tasks = None
_executor = None
is_shutdown = False
def __init__(self, executor):
self._queue = Queue.PriorityQueue()
self._scheduled_tasks = set()
self._executor = executor
t = Thread(target=self.run, name="Task Scheduler")
t.daemon = True
t.start()
# although this runs on a daemonized thread, we prefer to stop
# it gracefully to avoid random errors during interpreter shutdown
atexit.register(partial(_stop_scheduler, weakref.proxy(self), t))
def shutdown(self):
try:
log.debug("Shutting down Cluster Scheduler")
except AttributeError:
# this can happen on interpreter shutdown
pass
self.is_shutdown = True
self._queue.put_nowait((0, None))
def schedule(self, delay, fn, *args):
self._insert_task(delay, (fn, args))
def schedule_unique(self, delay, fn, *args):
task = (fn, args)
if task not in self._scheduled_tasks:
self._insert_task(delay, task)
else:
log.debug("Ignoring schedule_unique for already-scheduled task: %r", task)
def _insert_task(self, delay, task):
if not self.is_shutdown:
run_at = time.time() + delay
self._scheduled_tasks.add(task)
self._queue.put_nowait((run_at, task))
else:
log.debug("Ignoring scheduled task after shutdown: %r", task)
def run(self):
while True:
if self.is_shutdown:
return
try:
while True:
run_at, task = self._queue.get(block=True, timeout=None)
if self.is_shutdown:
log.debug("Not executing scheduled task due to Scheduler shutdown")
return
if run_at <= time.time():
self._scheduled_tasks.remove(task)
fn, args = task
future = self._executor.submit(fn, *args)
future.add_done_callback(self._log_if_failed)
else:
self._queue.put_nowait((run_at, task))
break
except Queue.Empty:
pass
time.sleep(0.1)
def _log_if_failed(self, future):
exc = future.exception()
if exc:
log.warning(
"An internally scheduled tasked failed with an unhandled exception:",
exc_info=exc)
def refresh_schema_and_set_result(keyspace, table, usertype, control_conn, response_future):
try:
if control_conn._meta_refresh_enabled:
log.debug("Refreshing schema in response to schema change. Keyspace: %s; Table: %s, Type: %s",
keyspace, table, usertype)
control_conn._refresh_schema(response_future._connection, keyspace, table, usertype)
else:
log.debug("Skipping schema refresh in response to schema change because meta refresh is disabled; "
"Keyspace: %s; Table: %s, Type: %s", keyspace, table, usertype)
except Exception:
log.exception("Exception refreshing schema in response to schema change:")
response_future.session.submit(
control_conn.refresh_schema, keyspace, table, usertype)
finally:
response_future._set_final_result(None)
class ResponseFuture(object):
"""
An asynchronous response delivery mechanism that is returned from calls
to :meth:`.Session.execute_async()`.
There are two ways for results to be delivered:
- Synchronously, by calling :meth:`.result()`
- Asynchronously, by attaching callback and errback functions via
:meth:`.add_callback()`, :meth:`.add_errback()`, and
:meth:`.add_callbacks()`.
"""
query = None
"""
The :class:`~.Statement` instance that is being executed through this
:class:`.ResponseFuture`.
"""
session = None
row_factory = None
message = None
default_timeout = None
_req_id = None
_final_result = _NOT_SET
_final_exception = None
_query_trace = None
_callbacks = None
_errbacks = None
_current_host = None
_current_pool = None
_connection = None
_query_retries = 0
_start_time = None
_metrics = None
_paging_state = None
def __init__(self, session, message, query, default_timeout=None, metrics=None, prepared_statement=None):
self.session = session
self.row_factory = session.row_factory
self.message = message
self.query = query
self.default_timeout = default_timeout
self._metrics = metrics
self.prepared_statement = prepared_statement
self._callback_lock = Lock()
if metrics is not None:
self._start_time = time.time()
self._make_query_plan()
self._event = Event()
self._errors = {}
self._callbacks = []
self._errbacks = []
def _make_query_plan(self):
# convert the list/generator/etc to an iterator so that subsequent
# calls to send_request (which retries may do) will resume where
# they last left off
self.query_plan = iter(self.session._load_balancer.make_query_plan(
self.session.keyspace, self.query))
def send_request(self):
""" Internal """
# query_plan is an iterator, so this will resume where we last left
# off if send_request() is called multiple times
for host in self.query_plan:
req_id = self._query(host)
if req_id is not None:
self._req_id = req_id
return
self._set_final_exception(NoHostAvailable(
"Unable to complete the operation against any hosts", self._errors))
def _query(self, host, message=None, cb=None):
if message is None:
message = self.message
if cb is None:
cb = self._set_result
pool = self.session._pools.get(host)
if not pool:
self._errors[host] = ConnectionException("Host has been marked down or removed")
return None
elif pool.is_shutdown:
self._errors[host] = ConnectionException("Pool is shutdown")
return None
self._current_host = host
self._current_pool = pool
connection = None
try:
# TODO get connectTimeout from cluster settings
connection, request_id = pool.borrow_connection(timeout=2.0)
self._connection = connection
connection.send_msg(message, request_id, cb=cb)
return request_id
except NoConnectionsAvailable as exc:
log.debug("All connections for host %s are at capacity, moving to the next host", host)
self._errors[host] = exc
return None
except Exception as exc:
log.debug("Error querying host %s", host, exc_info=True)
self._errors[host] = exc
if self._metrics is not None:
self._metrics.on_connection_error()
if connection:
pool.return_connection(connection)
return None
@property
def has_more_pages(self):
"""
Returns :const:`True` if there are more pages left in the
query results, :const:`False` otherwise. This should only
be checked after the first page has been returned.
.. versionadded:: 2.0.0
"""
return self._paging_state is not None
def start_fetching_next_page(self):
"""
If there are more pages left in the query result, this asynchronously
starts fetching the next page. If there are no pages left, :exc:`.QueryExhausted`
is raised. Also see :attr:`.has_more_pages`.
This should only be called after the first page has been returned.
.. versionadded:: 2.0.0
"""
if not self._paging_state:
raise QueryExhausted()
self._make_query_plan()
self.message.paging_state = self._paging_state
self._event.clear()
self._final_result = _NOT_SET
self._final_exception = None
self.send_request()
def _reprepare(self, prepare_message):
cb = partial(self.session.submit, self._execute_after_prepare)
request_id = self._query(self._current_host, prepare_message, cb=cb)
if request_id is None:
# try to submit the original prepared statement on some other host
self.send_request()
def _set_result(self, response):
try:
if self._current_pool and self._connection:
self._current_pool.return_connection(self._connection)
trace_id = getattr(response, 'trace_id', None)
if trace_id:
self._query_trace = QueryTrace(trace_id, self.session)
if isinstance(response, ResultMessage):
if response.kind == RESULT_KIND_SET_KEYSPACE:
session = getattr(self, 'session', None)
# since we're running on the event loop thread, we need to
# use a non-blocking method for setting the keyspace on
# all connections in this session, otherwise the event
# loop thread will deadlock waiting for keyspaces to be
# set. This uses a callback chain which ends with
# self._set_keyspace_completed() being called in the
# event loop thread.
if session:
session._set_keyspace_for_all_pools(
response.results, self._set_keyspace_completed)
elif response.kind == RESULT_KIND_SCHEMA_CHANGE:
# refresh the schema before responding, but do it in another
# thread instead of the event loop thread
self.session.submit(
refresh_schema_and_set_result,
response.results['keyspace'],
response.results.get('table'),
response.results.get('type'),
self.session.cluster.control_connection,
self)
else:
results = getattr(response, 'results', None)
if results is not None and response.kind == RESULT_KIND_ROWS:
self._paging_state = response.paging_state
results = self.row_factory(*results)
self._set_final_result(results)
elif isinstance(response, ErrorMessage):
retry_policy = None
if self.query:
retry_policy = self.query.retry_policy
if not retry_policy:
retry_policy = self.session.cluster.default_retry_policy
if isinstance(response, ReadTimeoutErrorMessage):
if self._metrics is not None:
self._metrics.on_read_timeout()
retry = retry_policy.on_read_timeout(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, WriteTimeoutErrorMessage):
if self._metrics is not None:
self._metrics.on_write_timeout()
retry = retry_policy.on_write_timeout(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, UnavailableErrorMessage):
if self._metrics is not None:
self._metrics.on_unavailable()
retry = retry_policy.on_unavailable(
self.query, retry_num=self._query_retries, **response.info)
elif isinstance(response, OverloadedErrorMessage):
if self._metrics is not None:
self._metrics.on_other_error()
# need to retry against a different host here
log.warning("Host %s is overloaded, retrying against a different "
"host", self._current_host)
self._retry(reuse_connection=False, consistency_level=None)
return
elif isinstance(response, IsBootstrappingErrorMessage):
if self._metrics is not None:
self._metrics.on_other_error()
# need to retry against a different host here
self._retry(reuse_connection=False, consistency_level=None)
return
elif isinstance(response, PreparedQueryNotFound):
if self.prepared_statement:
query_id = self.prepared_statement.query_id
assert query_id == response.info, \
"Got different query ID in server response (%s) than we " \
"had before (%s)" % (response.info, query_id)
else:
query_id = response.info
try:
prepared_statement = self.session.cluster._prepared_statements[query_id]
except KeyError:
if not self.prepared_statement:
log.error("Tried to execute unknown prepared statement: id=%s",
query_id.encode('hex'))
self._set_final_exception(response)
return
else:
prepared_statement = self.prepared_statement
self.session.cluster._prepared_statements[query_id] = prepared_statement
current_keyspace = self._connection.keyspace
prepared_keyspace = prepared_statement.keyspace
if prepared_keyspace and current_keyspace != prepared_keyspace:
self._set_final_exception(
ValueError("The Session's current keyspace (%s) does "
"not match the keyspace the statement was "
"prepared with (%s)" %
(current_keyspace, prepared_keyspace)))
return
log.debug("Re-preparing unrecognized prepared statement against host %s: %s",
self._current_host, prepared_statement.query_string)
prepare_message = PrepareMessage(query=prepared_statement.query_string)
# since this might block, run on the executor to avoid hanging
# the event loop thread
self.session.submit(self._reprepare, prepare_message)
return
else:
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
return
retry_type, consistency = retry
if retry_type is RetryPolicy.RETRY:
self._query_retries += 1
self._retry(reuse_connection=True, consistency_level=consistency)
elif retry_type is RetryPolicy.RETHROW:
self._set_final_exception(response.to_exception())
else: # IGNORE
if self._metrics is not None:
self._metrics.on_ignore()
self._set_final_result(None)
elif isinstance(response, ConnectionException):
if self._metrics is not None:
self._metrics.on_connection_error()
if not isinstance(response, ConnectionShutdown):
self._connection.defunct(response)
self._retry(reuse_connection=False, consistency_level=None)
elif isinstance(response, Exception):
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
else:
# we got some other kind of response message
msg = "Got unexpected message: %r" % (response,)
exc = ConnectionException(msg, self._current_host)
self._connection.defunct(exc)
self._set_final_exception(exc)
except Exception as exc:
# almost certainly caused by a bug, but we need to set something here
log.exception("Unexpected exception while handling result in ResponseFuture:")
self._set_final_exception(exc)
def _set_keyspace_completed(self, errors):
if not errors:
self._set_final_result(None)
else:
self._set_final_exception(ConnectionException(
"Failed to set keyspace on all hosts: %s" % (errors,)))
def _execute_after_prepare(self, response):
"""
Handle the response to our attempt to prepare a statement.
If it succeeded, run the original query again against the same host.
"""
if self._current_pool and self._connection:
self._current_pool.return_connection(self._connection)
if self._final_exception:
return
if isinstance(response, ResultMessage):
if response.kind == RESULT_KIND_PREPARED:
# use self._query to re-use the same host and
# at the same time properly borrow the connection
request_id = self._query(self._current_host)
if request_id is None:
# this host errored out, move on to the next
self.send_request()
else:
self._set_final_exception(ConnectionException(
"Got unexpected response when preparing statement "
"on host %s: %s" % (self._current_host, response)))
elif isinstance(response, ErrorMessage):
self._set_final_exception(response)
elif isinstance(response, ConnectionException):
log.debug("Connection error when preparing statement on host %s: %s",
self._current_host, response)
# try again on a different host, preparing again if necessary
self._errors[self._current_host] = response
self.send_request()
else:
self._set_final_exception(ConnectionException(
"Got unexpected response type when preparing "
"statement on host %s: %s" % (self._current_host, response)))
def _set_final_result(self, response):
if self._metrics is not None:
self._metrics.request_timer.addValue(time.time() - self._start_time)
with self._callback_lock:
self._final_result = response
self._event.set()
# apply each callback
for callback in self._callbacks:
fn, args, kwargs = callback
fn(response, *args, **kwargs)
def _set_final_exception(self, response):
if self._metrics is not None:
self._metrics.request_timer.addValue(time.time() - self._start_time)
with self._callback_lock:
self._final_exception = response
self._event.set()
for errback in self._errbacks:
fn, args, kwargs = errback
fn(response, *args, **kwargs)
def _retry(self, reuse_connection, consistency_level):
if self._final_exception:
# the connection probably broke while we were waiting
# to retry the operation
return
if self._metrics is not None:
self._metrics.on_retry()
if consistency_level is not None:
self.message.consistency_level = consistency_level
# don't retry on the event loop thread
self.session.submit(self._retry_task, reuse_connection)
def _retry_task(self, reuse_connection):
if self._final_exception:
# the connection probably broke while we were waiting
# to retry the operation
return
if reuse_connection and self._query(self._current_host) is not None:
return
# otherwise, move onto another host
self.send_request()
def result(self, timeout=_NOT_SET):
"""
Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until that time.
You may set a timeout (in seconds) with the `timeout` parameter.
By default, the :attr:`~.default_timeout` for the :class:`.Session`
this was created through will be used for the timeout on this
operation. If the timeout is exceeded, an
:exc:`cassandra.OperationTimedOut` will be raised.
Example usage::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:")
"""
if timeout is _NOT_SET:
timeout = self.default_timeout
if self._final_result is not _NOT_SET:
if self._paging_state is None:
return self._final_result
else:
return PagedResult(self, self._final_result, timeout)
elif self._final_exception:
raise self._final_exception
else:
self._event.wait(timeout=timeout)
if self._final_result is not _NOT_SET:
if self._paging_state is None:
return self._final_result
else:
return PagedResult(self, self._final_result, timeout)
elif self._final_exception:
raise self._final_exception
else:
raise OperationTimedOut(errors=self._errors, last_host=self._current_host)
def get_query_trace(self, max_wait=None):
"""
Returns the :class:`~.query.QueryTrace` instance representing a trace
of the last attempt for this operation, or :const:`None` if tracing was
not enabled for this query. Note that this may raise an exception if
there are problems retrieving the trace details from Cassandra. If the
trace is not available after `max_wait` seconds,
:exc:`cassandra.query.TraceUnavailable` will be raised.
"""
if not self._query_trace:
return None
self._query_trace.populate(max_wait)
return self._query_trace
def add_callback(self, fn, *args, **kwargs):
"""
Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the operation, a callback attached
here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()`
if you wish to handle that case.
If the final result has already been seen when this method is called,
the callback will be called immediately (before this method returns).
**Important**: if the callback you attach results in an exception being
raised, **the exception will be ignored**, so please ensure your
callback handles all error cases that you care about.
Usage example::
>>> session = cluster.connect("mykeyspace")
>>> def handle_results(rows, start_time, should_log=False):
... if should_log:
... log.info("Total time: %f", time.time() - start_time)
... ...
>>> future = session.execute_async("SELECT * FROM users")
>>> future.add_callback(handle_results, time.time(), should_log=True)
"""
run_now = False
with self._callback_lock:
if self._final_result is not _NOT_SET:
run_now = True
else:
self._callbacks.append((fn, args, kwargs))
if run_now:
fn(self._final_result, *args, **kwargs)
return self
def add_errback(self, fn, *args, **kwargs):
"""
Like :meth:`.add_callback()`, but handles error cases.
An Exception instance will be passed as the first positional argument
to `fn`.
"""
run_now = False
with self._callback_lock:
if self._final_exception:
run_now = True
else:
self._errbacks.append((fn, args, kwargs))
if run_now:
fn(self._final_exception, *args, **kwargs)
return self
def add_callbacks(self, callback, errback,
callback_args=(), callback_kwargs=None,
errback_args=(), errback_kwargs=None):
"""
A convenient combination of :meth:`.add_callback()` and
:meth:`.add_errback()`.
Example usage::
>>> session = cluster.connect()
>>> query = "SELECT * FROM mycf"
>>> future = session.execute_async(query)
>>> def log_results(results, level='debug'):
... for row in results:
... log.log(level, "Result: %s", row)
>>> def log_error(exc, query):
... log.error("Query '%s' failed: %s", query, exc)
>>> future.add_callbacks(
... callback=log_results, callback_kwargs={'level': 'info'},
... errback=log_error, errback_args=(query,))
"""
self.add_callback(callback, *callback_args, **(callback_kwargs or {}))
self.add_errback(errback, *errback_args, **(errback_kwargs or {}))
def clear_callbacks(self):
with self._callback_lock:
self._callback = []
self._errback = []
def __str__(self):
result = "(no result yet)" if self._final_result is _NOT_SET else self._final_result
return "<ResponseFuture: query='%s' request_id=%s result=%s exception=%s host=%s>" \
% (self.query, self._req_id, result, self._final_exception, self._current_host)
__repr__ = __str__
class QueryExhausted(Exception):
"""
Raised when :meth:`.ResponseFuture.start_fetching_next_page()` is called and
there are no more pages. You can check :attr:`.ResponseFuture.has_more_pages`
before calling to avoid this.
.. versionadded:: 2.0.0
"""
pass
class PagedResult(object):
"""
An iterator over the rows from a paged query result. Whenever the number
of result rows for a query exceed the :attr:`~.query.Statement.fetch_size`
(or :attr:`~.Session.default_fetch_size`, if not set) an instance of this
class will be returned.
You can treat this as a normal iterator over rows::
>>> from cassandra.query import SimpleStatement
>>> statement = SimpleStatement("SELECT * FROM users", fetch_size=10)
>>> for user_row in session.execute(statement):
... process_user(user_row)
Whenever there are no more rows in the current page, the next page will
be fetched transparently. However, note that it *is* possible for
an :class:`Exception` to be raised while fetching the next page, just
like you might see on a normal call to ``session.execute()``.
.. versionadded: 2.0.0
"""
response_future = None
def __init__(self, response_future, initial_response, timeout=_NOT_SET):
self.response_future = response_future
self.current_response = iter(initial_response)
self.timeout = timeout
def __iter__(self):
return self
def next(self):
try:
return next(self.current_response)
except StopIteration:
if not self.response_future.has_more_pages:
raise
self.response_future.start_fetching_next_page()
result = self.response_future.result(self.timeout)
if self.response_future.has_more_pages:
self.current_response = result.current_response
else:
self.current_response = iter(result)
return next(self.current_response)
__next__ = next
| {
"content_hash": "4f9bde4170bda072bee4e468086ee116",
"timestamp": "",
"source": "github",
"line_count": 3138,
"max_line_length": 176,
"avg_line_length": 40.13734862970045,
"alnum_prop": 0.6021786250208414,
"repo_name": "HackerEarth/cassandra-python-driver",
"id": "c7b2be7b73f45e8249e803976577dba297c61763",
"size": "126531",
"binary": false,
"copies": "1",
"ref": "refs/heads/cqlengine_multiplehosts",
"path": "cassandra/cluster.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "24198"
},
{
"name": "Python",
"bytes": "1428708"
}
],
"symlink_target": ""
} |
'''
Created on 27/08/2013
@author: luke
'''
import unittest
from linked_list import Node, LinkedList
class TestNode(unittest.TestCase):
"""
A node represents a segment in the linked list. It has a data field: get and setable
to represent the data it holds and a next field that references the next: get and setable
node in the list.
"""
def testData(self):
node = Node(data="data",next=None)
self.assertEqual(node.data,"data")
node.data = "new_data"
self.assertEqual(node.data,"new_data")
def testNext(self):
node = Node(data="data",next=None)
first_node = Node(data="first",next=node)
self.assertEqual(first_node.next,node)
third = Node(data="third",next=None)
node.next = third
self.assertEqual(node.next,third)
class TestLinkedList(unittest.TestCase):
def _populate_list(self):
for i in range(10):
self._ll.insert(Node(i))
def setUp(self):
self._ll = LinkedList()
def tearDown(self):
del(self._ll)
def testFirst(self):
"""
First returns the first item in the Linked list.
"""
self.assertIsNone(self._ll.first)
self._ll.insert(Node("data"))
self.assertEqual(self._ll.first,Node("data",next=None))
self._ll.insert(Node("First!"),ref_node=self._ll.first)
self.assertEqual(self._ll.first,Node("data",next=Node("First",next=None)))
def testInsert(self):
"""
The list inserts the given node and the given position
if position is not given it inserts at the end of the list
"""
for i in range(10):
self._ll.insert(Node(data=i))
i = 0
temp = self._ll.first
while temp.next != None:
self.assertEqual(temp.data,i)
temp = temp.next
i += 1
self._ll.insert(Node("data"),ref_node=self._ll.first)
self.assertEqual(Node("data"),self._ll.first.next)
self.assertEqual(Node(2),self._ll.first.next.next)
def testDelete(self):
"""
The list deletes the node after the node referenced by node
"""
self._populate_list()
self._ll.delete(self._ll.first.next)
temp = self._ll.first
while temp.next != None:
self.assertNotEqual(temp.data,2)
temp = temp.next
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | {
"content_hash": "641e585d90c8e3448029b56bf17a5354",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 93,
"avg_line_length": 30.674698795180724,
"alnum_prop": 0.5761979575805185,
"repo_name": "Drhealsgood/miniprojects",
"id": "0749f96a39e3f5ae77bbb6f2744272f5f7bee824",
"size": "2546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datastructures_and_algorithms/datastructures/linked_list/linked_list_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2709"
},
{
"name": "Java",
"bytes": "11564"
},
{
"name": "Python",
"bytes": "68698"
}
],
"symlink_target": ""
} |
default_app_config = 'kutime.apps.KutimeConfig'
| {
"content_hash": "b839a96c1e8ccbb65d383e82c58dec40",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 47,
"avg_line_length": 48,
"alnum_prop": 0.7916666666666666,
"repo_name": "littmus/kutime_web",
"id": "d58fdfc6b64342e3b11c764d8a33cf135ca8841d",
"size": "73",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kutime/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "745"
},
{
"name": "CoffeeScript",
"bytes": "15158"
},
{
"name": "HTML",
"bytes": "13991"
},
{
"name": "JavaScript",
"bytes": "12145"
},
{
"name": "Python",
"bytes": "22087"
},
{
"name": "Shell",
"bytes": "105"
}
],
"symlink_target": ""
} |
import logging
from itertools import chain
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.core.exceptions import ValidationError
from .parsing import Parser
from .errors import InlineValidationError, create_verbose_inline_errors
__all__ = ('Renderer', 'renderer',)
logger = logging.getLogger(__name__)
class Renderer(object):
def render(self, content, media=None,
raise_errors=False, log_errors=False, verbose_errors=True):
try:
nodes, syntax_errors = Parser(media=media).parse(content)
content, inline_errors = self.render_nodes(nodes, media)
except Exception as err:
if log_errors:
logger.exception(err)
if getattr(settings, 'INLINE_DEBUG', raise_errors):
raise
return u''
if bool(syntax_errors or inline_errors):
errors = [
ve[1] for ve in sorted(
chain(
((se.lineno, se) for se in syntax_errors),
((ie.lineno, ie) for ie in inline_errors)),
key=lambda obj: obj[0])]
validation_errors = \
create_verbose_inline_errors(errors) \
if verbose_errors else ValidationError(errors)
if log_errors:
for error_msg in validation_errors.messages:
logger.error(error_msg)
if raise_errors:
raise validation_errors
return u''
return mark_safe(content)
def render_nodes(self, nodes, media):
bits = []
errors = []
for node in nodes:
try:
bit = node.render(media)
except ValidationError as err:
bit = u''
errors.extend(
(InlineValidationError(node.lineno, msg)
for msg in err.messages))
bits.append(force_text(bit))
return u''.join(bits), errors
renderer = Renderer()
| {
"content_hash": "a33603d6c7009a4c4c6a2a1e7dc3b4a6",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 30.6231884057971,
"alnum_prop": 0.5584477046852816,
"repo_name": "acdha/django-inlines",
"id": "c7fedfca809efe5b3c672a548523b2d5f89733e3",
"size": "2113",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_inlines/rendering.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "74374"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from ...tokenizer import Tokenizer
from ...symbols import ORTH, LEMMA, POS
from ...lang.en import English
def test_issue1250_cached_special_cases():
nlp = English()
nlp.tokenizer.add_special_case(u'reimbur', [{ORTH: u'reimbur', LEMMA: u'reimburse', POS: u'VERB'}])
lemmas = [w.lemma_ for w in nlp(u'reimbur, reimbur...')]
assert lemmas == ['reimburse', ',', 'reimburse', '...']
lemmas = [w.lemma_ for w in nlp(u'reimbur, reimbur...')]
assert lemmas == ['reimburse', ',', 'reimburse', '...']
| {
"content_hash": "25803ccd86e1fb17308e84510fc61c58",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 103,
"avg_line_length": 42.84615384615385,
"alnum_prop": 0.6391382405745063,
"repo_name": "recognai/spaCy",
"id": "3b6e0bbf2e37534386b6dd8c60acab9a19e28826",
"size": "557",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "spacy/tests/regression/test_issue1250.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "103274"
},
{
"name": "C++",
"bytes": "161734"
},
{
"name": "CSS",
"bytes": "42943"
},
{
"name": "HTML",
"bytes": "904913"
},
{
"name": "JavaScript",
"bytes": "17993"
},
{
"name": "Python",
"bytes": "199094758"
},
{
"name": "Shell",
"bytes": "1091"
}
],
"symlink_target": ""
} |
""" Set of tools to deals with colors in python
"""
import hashlib
class NotAValidComponentException(Exception):
""" Exception raised when trying to get an invalid component """
pass
class Color(object):
""" Object to create, convert and manipulate color in python
>>> c = Color(string=b'network')
>>> c.get_rgb()
(210, 12, 100)
>>> c.get_hex()
'#d20c64'
>>> c.get_hsl()
(333, 89, 44)
"""
def __init__(self, rgb=(0, 0, 0), hex_rgb="#000000", string=None):
""" Color object constuctor
:param: rgb: The RGB int tuple of this color
:type rgb: :py:class `tuple`
:param: hex_rgb: The hexadecimale rgb of this color
:type hex_rgb: :py:class `str`
:param: string: A string to convert into a color
:type string: :py:class `str`
"""
if not hex_rgb == "#000000":
self._hex_rgb = hex_rgb
self._rgb = self.convert_hex_to_rgb()
self._hsl = self.convert_rgb_to_hsl()
elif string:
self.set_hex_from_string(string)
else:
self._rgb = rgb
self._hex = self.convert_rgb_to_hex()
self._hsl = self.convert_rgb_to_hsl()
@property
def models(self):
""" Compute the models dict on demands """
return {
'RGB': self.get_rgb(),
'HSL': self.get_hsl()
}
def __repr__(self):
return "<Color r:%s, g:%s, b:%s>" % self.get_rgb()
def __getitem__(self, component):
""" Get one component of color models """
# uppercase component
component = component.upper()
# get a list of available components
good_components = "".join(self.models.keys())
# detect bad item
if component not in good_components:
raise NotAValidComponentException()
# get the index
component_index = good_components.index(component)
# get the model from which the component comes
model = list(self.models.values())[int(component_index / 3)]
# return the component
return model[component_index % 3]
def get_hex(self):
""" Get the hexadecimal representation of the color. """
return self._hex_rgb
def get_rgb(self):
""" Get the rgb representation of the color. """
return self._rgb
def get_hsl(self):
""" Get the HSL representation of the color. """
return self._hsl
def set_hex_from_rgb(self):
""" set the HEX color value """
self._hex_rgb = self.convert_rgb_to_hex()
def set_rgb_from_hex(self):
""" set the RGB from the HEX value """
self._rgb = self.convert_hex_to_rgb()
def set_hsl_from_rgb(self):
""" set the HSL from the RGB value """
self._hsl = self.convert_rgb_to_hsl()
def set_hsl_from_hex(self):
""" set the HSL from the RGB value """
self._rgb = self.convert_hex_to_rgb()
self._hsl = self.convert_rgb_to_hsl()
def convert_rgb_to_hex(self):
""" Convert RGB color to HEX color """
# unpack rgb
(red, green, blue) = self.get_rgb()
# Then we get the hex value, keep only the interesting part
# since hex(int) return 0x??.
# At the end, fill result with 0 to get a two characters string
hexred = hex(int(red))[2:].zfill(2)
# do this for the rest of color parts
hexgreen = hex(int(green))[2:].zfill(2)
hexblue = hex(int(blue))[2:].zfill(2)
# return the concatenation or hex_{r,g,b}, prefixed by `#`
return "#%s" % "".join((hexred, hexgreen, hexblue))
def convert_hex_to_rgb(self):
""" Convert HEX color to RGB color """
# split hex
_ = self.get_hex()[1:]
return (int(_[:2], 16),
int(_[2:4], 16),
int(_[4:], 16))
def convert_rgb_to_hsl(self):
""" Convert RGB color to HSL color """
max_rgb = max(*self.get_rgb())
min_rgb = min(*self.get_rgb())
diff_max_min = float(max_rgb - min_rgb)
# compute h
if diff_max_min == 0:
h_hsl = 0
elif max_rgb == self._rgb[0]:
h_hsl = ((self._rgb[1] - self._rgb[2]) / diff_max_min) % 6
elif max_rgb == self._rgb[1]:
h_hsl = (self._rgb[2] - self._rgb[0]) / diff_max_min + 2
elif max_rgb == self._rgb[2]:
h_hsl = (self._rgb[0] - self._rgb[1]) / diff_max_min + 4
h_hsl = 60 * h_hsl
# compute L
# we want the % value, so divide by 255
l_hsl = 0.5 * (max_rgb + min_rgb) / 255.
# compute S
if l_hsl in (0, 1):
s_hsl = 0
else:
# we want the % value, so divide by 255
s_hsl = diff_max_min / float((1. - abs(2. * l_hsl - 1.))) / 255.
return (int(round(h_hsl)),
int(round(s_hsl*100)),
int(round(l_hsl*100)))
def set_rgb_from_string(self, string):
""" Set the rgb color representation of
a given string.
:param: string: The string to colorize_string
:type string: :py:class `bytes`
"""
# get the md5 hexdigest of the string
# its length is always 32
hash_str = hashlib.md5(string).hexdigest()
# take 3 slices of 8 bytes in this hash to create
# red, green and blue values
red, green, blue = (hash_str[8*i:8*(i+1)] for i in range(0, 3))
# compute the int representation for red, green and blue
# We first get the int value of color part with int(red, 16)
# The color part (red,green or blue) is ranged between 0 and 256
int_r = int(red, 16) % 256
# do this for the rest of color parts
int_g = int(green, 16) % 256
int_b = int(blue, 16) % 256
# return the r, g, b tuple
self._rgb = (int_r, int_g, int_b)
def set_hex_from_string(self, string):
""" Set the HEX color representation of
a given string.
:param: string: The string to colorize_string
:type string: :py:class `bytes`
"""
# convert string to RGB then RGB to HEX and HEX TO HSL
self.set_rgb_from_string(string)
self.set_hex_from_rgb()
self.set_hsl_from_hex()
| {
"content_hash": "571571c2adae86ca466332eee4719840",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 76,
"avg_line_length": 35.67039106145251,
"alnum_prop": 0.5357870007830854,
"repo_name": "avcreation/pyPoppins",
"id": "23185a75969439ad6e0eed39fcb3201a596707f3",
"size": "6431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poppinsbag/colors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10749"
}
],
"symlink_target": ""
} |
import sys
import os
import time
with open(sys.argv[2], "rt") as file:
for line in file:
os.system("./scrape"+sys.argv[1]+" "+line)
time.sleep(1) #sleep between so you can quit and so it doesn't spam the site with requests. You can delete this line if you know the site won't mind, but beware you'll need to hold down control c for each of the go programs because of the way that they run
| {
"content_hash": "564b857db3be09547d6cf2f5b0daf437",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 264,
"avg_line_length": 51.25,
"alnum_prop": 0.7024390243902439,
"repo_name": "loadedice/Go-scrape",
"id": "43ef753ef133683838746862d169700016b17703",
"size": "434",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "go-deeper.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Go",
"bytes": "1642"
},
{
"name": "Python",
"bytes": "434"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
from aiohttp.test_utils import AioHTTPTestCase, BaseTestServer, unittest_run_loop
from aiosparql.client import SPARQLClient
from aiosparql.syntax import IRI
__all__ = ["unittest_run_loop", "AioSPARQLTestCase", "TestSPARQLClient"]
class TestSPARQLClient:
def __init__(self, server, *, cookie_jar=None, loop=None, **kwargs):
if not isinstance(server, BaseTestServer):
raise TypeError(
"server must be web.Application TestServer "
"instance, found type: %r" % type(server)
)
self._server = server
self._loop = loop
self._client_kwargs = kwargs
self._session = None
self._closed = False
async def start_server(self):
await self._server.start_server(loop=self._loop)
kwargs = dict(self._client_kwargs)
if kwargs.get("endpoint"):
kwargs["endpoint"] = self.make_url(kwargs["endpoint"])
if kwargs.get("update_endpoint"):
kwargs["update_endpoint"] = self.make_url(kwargs["update_endpoint"])
if kwargs.get("crud_endpoint"):
kwargs["crud_endpoint"] = self.make_url(kwargs["crud_endpoint"])
self._session = SPARQLClient(loop=self._loop, **kwargs)
@property
def host(self):
return self._server.host # pragma nocover
@property
def port(self):
return self._server.port # pragma nocover
@property
def server(self):
return self._server # pragma nocover
@property
def session(self):
if self._session is None:
raise RuntimeError(
"Trying to access SPARQLClient before the " "server has started"
) # pragma nocover
return self._session
def make_url(self, path):
return str(self._server.make_url(path))
def query(self, query, *args, **keywords):
return self.session.query(query, *args, **keywords)
def update(self, query, *args, **keywords):
return self.session.update(query, *args, **keywords)
def get(self, *args, **kwargs):
return self.session.get(*args, **kwargs)
def put(self, *args, **kwargs):
return self.session.put(*args, **kwargs)
def delete(self, *args, **kwargs):
return self.session.delete(*args, **kwargs)
def post(self, *args, **kwargs):
return self.session.post(*args, **kwargs)
async def close(self):
if not self._closed:
await self._session.close()
await self._server.close()
self._closed = True
class AioSPARQLTestCase(AioHTTPTestCase):
client_kwargs = {
"endpoint": "/sparql",
"graph": IRI("http://mu.semte.ch/test-application"),
}
async def get_client(self, server):
return TestSPARQLClient(server, loop=self.loop, **self.client_kwargs)
| {
"content_hash": "ae8595d5d7590e0570ed357f90c0f81d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 81,
"avg_line_length": 32.59770114942529,
"alnum_prop": 0.610719322990127,
"repo_name": "tenforce/sparql-aiohttp",
"id": "3722e70e7db993c8e5441b56d009ba21c200a811",
"size": "2836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiosparql/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29122"
}
],
"symlink_target": ""
} |
"""
Example DAG demonstrating the usage of DateTimeBranchOperator with datetime as well as time objects as
targets.
"""
import pendulum
from airflow import DAG
from airflow.operators.datetime import BranchDateTimeOperator
from airflow.operators.empty import EmptyOperator
dag1 = DAG(
dag_id="example_branch_datetime_operator",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
catchup=False,
tags=["example"],
schedule_interval="@daily",
)
# [START howto_branch_datetime_operator]
empty_task_11 = EmptyOperator(task_id='date_in_range', dag=dag1)
empty_task_21 = EmptyOperator(task_id='date_outside_range', dag=dag1)
cond1 = BranchDateTimeOperator(
task_id='datetime_branch',
follow_task_ids_if_true=['date_in_range'],
follow_task_ids_if_false=['date_outside_range'],
target_upper=pendulum.datetime(2020, 10, 10, 15, 0, 0),
target_lower=pendulum.datetime(2020, 10, 10, 14, 0, 0),
dag=dag1,
)
# Run empty_task_1 if cond1 executes between 2020-10-10 14:00:00 and 2020-10-10 15:00:00
cond1 >> [empty_task_11, empty_task_21]
# [END howto_branch_datetime_operator]
dag2 = DAG(
dag_id="example_branch_datetime_operator_2",
start_date=pendulum.datetime(2021, 1, 1, tz="UTC"),
catchup=False,
tags=["example"],
schedule_interval="@daily",
)
# [START howto_branch_datetime_operator_next_day]
empty_task_12 = EmptyOperator(task_id='date_in_range', dag=dag2)
empty_task_22 = EmptyOperator(task_id='date_outside_range', dag=dag2)
cond2 = BranchDateTimeOperator(
task_id='datetime_branch',
follow_task_ids_if_true=['date_in_range'],
follow_task_ids_if_false=['date_outside_range'],
target_upper=pendulum.time(0, 0, 0),
target_lower=pendulum.time(15, 0, 0),
dag=dag2,
)
# Since target_lower happens after target_upper, target_upper will be moved to the following day
# Run empty_task_1 if cond2 executes between 15:00:00, and 00:00:00 of the following day
cond2 >> [empty_task_12, empty_task_22]
# [END howto_branch_datetime_operator_next_day]
| {
"content_hash": "ee8f53ea3e12c8f764a327d7f6d99178",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 102,
"avg_line_length": 33.78333333333333,
"alnum_prop": 0.7143561914158856,
"repo_name": "danielvdende/incubator-airflow",
"id": "e707514c868a07f40ac35cfaabbde1a4b3492494",
"size": "2815",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/example_dags/example_branch_datetime_operator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
"""Various algorithms for meshes that don't fit anywhere else."""
import itertools as it
import numpy as np
import ap.mesh.parsers as parsers
def sorted_edges(element, argyris=True):
"""
Return the edges (as sorted (start, end) tuples) of a given element.
Required Arguments
------------------
* element : array-like container of the nodes comprising a finite element.
Assumed to be in GMSH or Argyris order.
Optional Arguments
------------------
* argyris : Set to True (default) to treat the input as an Argyris
element.
"""
if argyris:
return [tuple(sorted((element[i], element[j])))
for (i, j) in [(0, 1), (0, 2), (1, 2)]]
else:
raise NotImplementedError
def extract_boundary_edges(elements):
"""
Some meshes do not specify edge information. Extract it from the
connectivity matrix.
Required Arguments
------------------
* elements : element connectivity matrix. Assumes Lagrange elements
and GMSH-style ordering.
Output
------
The output is a list of edge tuples. For example:
print(extract_boundary_edges(mesh.elements))
=> [(1, 5, 7, -1), (2, 10, 12, -1), (3, 15, 17, -1),
(4, 20, 22, -1), (5, 6, 8, -1), (6, 2, 9, -1),
(10, 11, 13, -1), (11, 3, 14, -1), (15, 16, 18, -1),
(16, 4, 19, -1), (20, 21, 23, -1), (21, 1, 24, -1)]
print(mesh.edges)
=> [(1, 5, 7, 1), (5, 6, 8, 1), (6, 2, 9, 1), (2, 10, 12, 2),
(10, 11, 13, 2), (11, 3, 14, 2), (3, 15, 17, 3),
(15, 16, 18, 3), (16, 4, 19, 3), (4, 20, 22, 4),
(20, 21, 23, 4), (21, 1, 24, 4)]
where the last index, rather than being an edge label (based on
GMSHs line attribute), is -1.
"""
# Keep track of the original edges. At the end return the non-sorted edges.
original_order = dict()
sorted_edges = set()
side_nodes = int((len(elements[0])-3)/3)
for element in elements:
# Guarantee uniqueness of edges by sorting the nodes. At the end map
# the sorted versions back to the original versions.
local_edges = [(element[i], ) + (element[j], ) +
tuple(element[3+i*side_nodes:3+(i+1)*side_nodes])
+ (-1, ) for (i, j) in [(0, 1), (1, 2), (2, 0)]]
local_sorted_edges = [tuple(sorted(t)) for t in local_edges]
original_order.update(zip(local_sorted_edges, local_edges))
for edge in local_sorted_edges:
if edge in sorted_edges:
sorted_edges.remove(edge)
else:
sorted_edges.add(edge)
return [original_order[t] for t in sorted_edges]
def project_nodes(projection, elements, original_nodes,
attempt_flatten=False):
"""
Given a projection and components of a finite element mesh, project
the nodes with the supplied function. For quadratics, recalculate
the location of the midpoint nodes afterwards. Assumes GMSH ordering
of nodes for quadratics.
Required Arguments
------------------
* elements : element connectivity matrix. Assumes Lagrange elements
and GMSH-style ordering.
* original_nodes : nodal coordinates corresponding to elements.
Optional Arguments
------------------
* attempt_flatten : try to (instead of applying the projection) drop
the last dimension.
Output
------
A numpy array of the projected nodes.
"""
if attempt_flatten:
if np.all(original_nodes[:, -1] == original_nodes[0, -1]):
nodes = np.ascontiguousarray(original_nodes[:, 0:-1])
return nodes
nodes = np.array([projection(node) for node in original_nodes])
# Do nothing for linears: there are no midpoints to fix.
if elements.shape[1] == 3:
pass
# fix quadratics.
elif elements.shape[1] == 6:
for i in range(2):
for (j, k) in [(0, 1), (1, 2), (2, 0)]:
nodes[elements[:, j + 3] - 1, i] = \
0.5*(nodes[elements[:, j] - 1, i] +
nodes[elements[:, k] - 1, i])
return nodes
def change_order(mesh, order):
"""
Change the order of the elements in a mesh.
"""
max_node_num = mesh.elements.max()
if mesh.elements.shape[1] == 3 and order == 2:
new_elements = np.zeros((mesh.elements.shape[0], 6),
dtype=mesh.elements.dtype)
new_elements[:, :3] = np.sort(mesh.elements, axis=1)
if len(mesh.edge_collections.items()) != 1:
raise NotImplementedError("Cannot handle multiple edge collections")
edge_lookup = dict()
midpoint_number = max_node_num + 1
for element in new_elements:
for k, (i, j) in enumerate([(0, 1), (1, 2), (0, 2)]):
pair = (element[i], element[j])
if pair not in edge_lookup:
edge_lookup[pair] = midpoint_number
midpoint_number += 1
element[k + 3] = edge_lookup[pair]
new_nodes = np.zeros((new_elements.max(), 2))
new_nodes[:max_node_num, :] = mesh.nodes
for edge, new_node in edge_lookup.items():
new_nodes[new_node - 1] = (
new_nodes[edge[0] - 1] + new_nodes[edge[1] - 1])/2.0
else:
raise NotImplementedError("Unsupported mesh order conversion")
return parsers.ParseArrays(new_elements, new_nodes)
def organize_edges(edges, borders=None, default_border='land'):
"""
Organize edges in to various collections specified by borders.
Required Arguments
------------------
* edges : List of edges of the form
(node_number, node_number, ... , edge_type)
The sorting process requires edge_type to be the last
entry in each tuple.
Optional Arguments
------------------
* borders : a dictionary correlating border numbers with names. For
example,
borders = {'ocean_in' : 1, 'ocean_out' : 2}
* default_border : the border to place all other edges in. Defaults to
'land'.
"""
if borders is None:
borders = dict()
if default_border in borders:
raise ValueError("Specific border and default border share same name")
edge_collections = dict()
edge_collections[default_border] = set(edges)
for border, labels in borders.items():
edge_collections[border] = {t for t in edge_collections[default_border]
if t[-1] in labels}
edge_collections[default_border] -= edge_collections[border]
return edge_collections
| {
"content_hash": "7eaf07f843cc7e766e319254c4fae4aa",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 80,
"avg_line_length": 35.310880829015545,
"alnum_prop": 0.5540719002201027,
"repo_name": "VT-ICAM/ArgyrisPack",
"id": "86ae7ec1fdd6a600f0461b260d98d13263f6d722",
"size": "6838",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ap/mesh/meshtools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "74470"
},
{
"name": "Julia",
"bytes": "7273"
},
{
"name": "Matlab",
"bytes": "8547"
},
{
"name": "Python",
"bytes": "88209"
}
],
"symlink_target": ""
} |
"""Constants used in CSIT."""
class Constants(object):
"""Constants used in CSIT."""
# OpenVPP testing directory location at topology nodes
REMOTE_FW_DIR = "/tmp/openvpp-testing"
# shell scripts location
RESOURCES_LIB_SH = "resources/libraries/bash"
# vat templates location
RESOURCES_TPL_VAT = "resources/templates/vat"
# OpenVPP VAT binary name
VAT_BIN_NAME = "vpp_api_test"
# QEMU version to install
QEMU_INSTALL_VERSION = "qemu-2.5.0"
# QEMU install directory
QEMU_INSTALL_DIR = "/opt/qemu-2.5.0"
# Honeycomb directory location at topology nodes:
REMOTE_HC_DIR = "/opt/honeycomb"
# Honeycomb persistence files location
REMOTE_HC_PERSIST = "/var/lib/honeycomb/persist"
# Honeycomb templates location
RESOURCES_TPL_HC = "resources/templates/honeycomb"
# ODL Client Restconf listener port
ODL_PORT = 8181
| {
"content_hash": "0d8db450a459f4a7e121d283f495d2cc",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 58,
"avg_line_length": 25.771428571428572,
"alnum_prop": 0.6807095343680709,
"repo_name": "FDio/vpp",
"id": "63428b0c4d4c4017cd41b7457123feb0c7721780",
"size": "1498",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "extras/vpp_config/vpplib/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "19971"
},
{
"name": "C",
"bytes": "26080388"
},
{
"name": "C++",
"bytes": "1180881"
},
{
"name": "CMake",
"bytes": "229900"
},
{
"name": "Dockerfile",
"bytes": "1075"
},
{
"name": "Emacs Lisp",
"bytes": "111146"
},
{
"name": "Go",
"bytes": "66545"
},
{
"name": "HTML",
"bytes": "636"
},
{
"name": "Jinja",
"bytes": "1135"
},
{
"name": "Lua",
"bytes": "79974"
},
{
"name": "M4",
"bytes": "257"
},
{
"name": "Makefile",
"bytes": "105502"
},
{
"name": "Perl",
"bytes": "6569"
},
{
"name": "Python",
"bytes": "5028232"
},
{
"name": "Ruby",
"bytes": "3865"
},
{
"name": "Shell",
"bytes": "148207"
}
],
"symlink_target": ""
} |
from sqlalchemy.testing import assert_raises, assert_raises_message
import datetime
import sqlalchemy as sa
from sqlalchemy import testing
from sqlalchemy import Integer, String, ForeignKey, MetaData, and_
from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.orm import mapper, relationship, relation, \
backref, create_session, configure_mappers, \
clear_mappers, sessionmaker, attributes,\
Session, composite, column_property, foreign,\
remote, synonym, joinedload, subqueryload
from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE
from sqlalchemy.testing import eq_, startswith_, AssertsCompiledSQL, is_
from sqlalchemy.testing import fixtures
from test.orm import _fixtures
from sqlalchemy import exc
from sqlalchemy import inspect
class _RelationshipErrors(object):
def _assert_raises_no_relevant_fks(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
sa.exc.ArgumentError,
"Could not locate any relevant foreign key columns "
"for %s join condition '%s' on relationship %s. "
"Ensure that referencing columns are associated with "
"a ForeignKey or ForeignKeyConstraint, or are annotated "
r"in the join condition with the foreign\(\) annotation."
% (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_no_equality(self, fn, expr, relname,
primary, *arg, **kw):
assert_raises_message(
sa.exc.ArgumentError,
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for %s join "
"condition '%s' on relationship %s. "
"Ensure that referencing columns are associated with a "
"ForeignKey or ForeignKeyConstraint, or are annotated in "
r"the join condition with the foreign\(\) annotation. "
"To allow comparison operators other than '==', "
"the relationship can be marked as viewonly=True." % (
primary, expr, relname
),
fn, *arg, **kw
)
def _assert_raises_ambig_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.ArgumentError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are multiple foreign key paths linking the "
"tables via secondary table '%s'. "
"Specify the 'foreign_keys' argument, providing a list "
"of those columns which should be counted as "
"containing a foreign key reference from the "
"secondary table to each of the parent and child tables."
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.ArgumentError,
"Could not determine join "
"condition between parent/child tables on "
"relationship %s - there are multiple foreign key "
"paths linking the tables. Specify the "
"'foreign_keys' argument, providing a list of those "
"columns which should be counted as containing a "
"foreign key reference to the parent table."
% (relname,),
fn, *arg, **kw)
def _assert_raises_no_join(self, fn, relname, secondary_arg,
*arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables "
"via secondary table '%s'. "
"Ensure that referencing columns are associated with a "
"ForeignKey "
"or ForeignKeyConstraint, or specify 'primaryjoin' and "
"'secondaryjoin' expressions"
% (relname, secondary_arg),
fn, *arg, **kw)
else:
assert_raises_message(
exc.NoForeignKeysError,
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
"Ensure that referencing columns are associated with a "
"ForeignKey "
"or ForeignKeyConstraint, or specify a 'primaryjoin' "
"expression."
% (relname,),
fn, *arg, **kw)
def _assert_raises_ambiguous_direction(self, fn, relname, *arg, **kw):
assert_raises_message(
sa.exc.ArgumentError,
"Can't determine relationship"
" direction for relationship '%s' - foreign "
"key columns within the join condition are present "
"in both the parent and the child's mapped tables. "
"Ensure that only those columns referring to a parent column "
r"are marked as foreign, either via the foreign\(\) annotation or "
"via the foreign_keys argument."
% relname,
fn, *arg, **kw
)
def _assert_raises_no_local_remote(self, fn, relname, *arg, **kw):
assert_raises_message(
sa.exc.ArgumentError,
"Relationship %s could not determine "
"any unambiguous local/remote column "
"pairs based on join condition and remote_side arguments. "
r"Consider using the remote\(\) annotation to "
"accurately mark those elements of the join "
"condition that are on the remote side of the relationship." % (
relname
),
fn, *arg, **kw
)
class DependencyTwoParentTest(fixtures.MappedTest):
"""Test flush() when a mapper is dependent on multiple relationships"""
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table("tbl_a", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("name", String(128)))
Table("tbl_b", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("name", String(128)))
Table("tbl_c", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("tbl_a_id", Integer, ForeignKey("tbl_a.id"),
nullable=False),
Column("name", String(128)))
Table("tbl_d", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("tbl_c_id", Integer, ForeignKey("tbl_c.id"),
nullable=False),
Column("tbl_b_id", Integer, ForeignKey("tbl_b.id")),
Column("name", String(128)))
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
pass
class C(cls.Basic):
pass
class D(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
A, C, B, D, tbl_b, tbl_c, tbl_a, tbl_d = (cls.classes.A,
cls.classes.C,
cls.classes.B,
cls.classes.D,
cls.tables.tbl_b,
cls.tables.tbl_c,
cls.tables.tbl_a,
cls.tables.tbl_d)
mapper(A, tbl_a, properties=dict(
c_rows=relationship(C, cascade="all, delete-orphan",
backref="a_row")))
mapper(B, tbl_b)
mapper(C, tbl_c, properties=dict(
d_rows=relationship(D, cascade="all, delete-orphan",
backref="c_row")))
mapper(D, tbl_d, properties=dict(
b_row=relationship(B)))
@classmethod
def insert_data(cls):
A, C, B, D = (cls.classes.A,
cls.classes.C,
cls.classes.B,
cls.classes.D)
session = create_session()
a = A(name='a1')
b = B(name='b1')
c = C(name='c1', a_row=a)
d1 = D(name='d1', b_row=b, c_row=c) # noqa
d2 = D(name='d2', b_row=b, c_row=c) # noqa
d3 = D(name='d3', b_row=b, c_row=c) # noqa
session.add(a)
session.add(b)
session.flush()
def test_DeleteRootTable(self):
A = self.classes.A
session = create_session()
a = session.query(A).filter_by(name='a1').one()
session.delete(a)
session.flush()
def test_DeleteMiddleTable(self):
C = self.classes.C
session = create_session()
c = session.query(C).filter_by(name='c1').one()
session.delete(c)
session.flush()
class M2ODontOverwriteFKTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
'a', metadata,
Column('id', Integer, primary_key=True),
Column('bid', ForeignKey('b.id'))
)
Table(
'b', metadata,
Column('id', Integer, primary_key=True),
)
def _fixture(self, uselist=False):
a, b = self.tables.a, self.tables.b
class A(fixtures.BasicEntity):
pass
class B(fixtures.BasicEntity):
pass
mapper(A, a, properties={
'b': relationship(B, uselist=uselist)
})
mapper(B, b)
return A, B
def test_joinedload_doesnt_produce_bogus_event(self):
A, B = self._fixture()
sess = Session()
b1 = B()
sess.add(b1)
sess.flush()
a1 = A()
sess.add(a1)
sess.commit()
# test that was broken by #3060
a1 = sess.query(A).options(joinedload("b")).first()
a1.bid = b1.id
sess.flush()
eq_(a1.bid, b1.id)
def test_init_doesnt_produce_scalar_event(self):
A, B = self._fixture()
sess = Session()
b1 = B()
sess.add(b1)
sess.flush()
a1 = A()
assert a1.b is None
a1.bid = b1.id
sess.add(a1)
sess.flush()
assert a1.bid is not None
def test_init_doesnt_produce_collection_event(self):
A, B = self._fixture(uselist=True)
sess = Session()
b1 = B()
sess.add(b1)
sess.flush()
a1 = A()
assert a1.b == []
a1.bid = b1.id
sess.add(a1)
sess.flush()
assert a1.bid is not None
def test_scalar_relationship_overrides_fk(self):
A, B = self._fixture()
sess = Session()
b1 = B()
sess.add(b1)
sess.flush()
a1 = A()
a1.bid = b1.id
a1.b = None
sess.add(a1)
sess.flush()
assert a1.bid is None
def test_collection_relationship_overrides_fk(self):
A, B = self._fixture(uselist=True)
sess = Session()
b1 = B()
sess.add(b1)
sess.flush()
a1 = A()
a1.bid = b1.id
a1.b = []
sess.add(a1)
sess.flush()
# this is weird
assert a1.bid is not None
class DirectSelfRefFKTest(fixtures.MappedTest, AssertsCompiledSQL):
"""Tests the ultimate join condition, a single column
that points to itself, e.g. within a SQL function or similar.
The test is against a materialized path setup.
this is an **extremely** unusual case::
Entity
------
path -------+
^ |
+---------+
In this case, one-to-many and many-to-one are no longer accurate.
Both relationships return collections. I'm not sure if this is a good
idea.
"""
__dialect__ = 'default'
@classmethod
def define_tables(cls, metadata):
Table('entity', metadata,
Column('path', String(100), primary_key=True)
)
@classmethod
def setup_classes(cls):
class Entity(cls.Basic):
def __init__(self, path):
self.path = path
def _descendants_fixture(self, data=True):
Entity = self.classes.Entity
entity = self.tables.entity
m = mapper(Entity, entity, properties={
"descendants": relationship(
Entity,
primaryjoin=remote(foreign(entity.c.path)).like(
entity.c.path.concat('/%')),
viewonly=True,
order_by=entity.c.path)
})
configure_mappers()
assert m.get_property("descendants").direction is ONETOMANY
if data:
return self._fixture()
def _anscestors_fixture(self, data=True):
Entity = self.classes.Entity
entity = self.tables.entity
m = mapper(Entity, entity, properties={
"anscestors": relationship(
Entity,
primaryjoin=entity.c.path.like(
remote(foreign(entity.c.path)).concat('/%')),
viewonly=True,
order_by=entity.c.path)
})
configure_mappers()
assert m.get_property("anscestors").direction is ONETOMANY
if data:
return self._fixture()
def _fixture(self):
Entity = self.classes.Entity
sess = Session()
sess.add_all([
Entity("/foo"),
Entity("/foo/bar1"),
Entity("/foo/bar2"),
Entity("/foo/bar2/bat1"),
Entity("/foo/bar2/bat2"),
Entity("/foo/bar3"),
Entity("/bar"),
Entity("/bar/bat1")
])
return sess
def test_descendants_lazyload_clause(self):
self._descendants_fixture(data=False)
Entity = self.classes.Entity
self.assert_compile(
Entity.descendants.property.strategy._lazywhere,
"entity.path LIKE (:param_1 || :path_1)"
)
self.assert_compile(
Entity.descendants.property.strategy._rev_lazywhere,
":param_1 LIKE (entity.path || :path_1)"
)
def test_ancestors_lazyload_clause(self):
self._anscestors_fixture(data=False)
Entity = self.classes.Entity
# :param_1 LIKE (:param_1 || :path_1)
self.assert_compile(
Entity.anscestors.property.strategy._lazywhere,
":param_1 LIKE (entity.path || :path_1)"
)
self.assert_compile(
Entity.anscestors.property.strategy._rev_lazywhere,
"entity.path LIKE (:param_1 || :path_1)"
)
def test_descendants_lazyload(self):
sess = self._descendants_fixture()
Entity = self.classes.Entity
e1 = sess.query(Entity).filter_by(path="/foo").first()
eq_(
[e.path for e in e1.descendants],
["/foo/bar1", "/foo/bar2", "/foo/bar2/bat1",
"/foo/bar2/bat2", "/foo/bar3"]
)
def test_anscestors_lazyload(self):
sess = self._anscestors_fixture()
Entity = self.classes.Entity
e1 = sess.query(Entity).filter_by(path="/foo/bar2/bat1").first()
eq_(
[e.path for e in e1.anscestors],
["/foo", "/foo/bar2"]
)
def test_descendants_joinedload(self):
sess = self._descendants_fixture()
Entity = self.classes.Entity
e1 = sess.query(Entity).filter_by(path="/foo").\
options(joinedload(Entity.descendants)).first()
eq_(
[e.path for e in e1.descendants],
["/foo/bar1", "/foo/bar2", "/foo/bar2/bat1",
"/foo/bar2/bat2", "/foo/bar3"]
)
def test_descendants_subqueryload(self):
sess = self._descendants_fixture()
Entity = self.classes.Entity
e1 = sess.query(Entity).filter_by(path="/foo").\
options(subqueryload(Entity.descendants)).first()
eq_(
[e.path for e in e1.descendants],
["/foo/bar1", "/foo/bar2", "/foo/bar2/bat1",
"/foo/bar2/bat2", "/foo/bar3"]
)
def test_anscestors_joinedload(self):
sess = self._anscestors_fixture()
Entity = self.classes.Entity
e1 = sess.query(Entity).filter_by(path="/foo/bar2/bat1").\
options(joinedload(Entity.anscestors)).first()
eq_(
[e.path for e in e1.anscestors],
["/foo", "/foo/bar2"]
)
def test_plain_join_descendants(self):
self._descendants_fixture(data=False)
Entity = self.classes.Entity
sess = Session()
self.assert_compile(
sess.query(Entity).join(Entity.descendants, aliased=True),
"SELECT entity.path AS entity_path FROM entity JOIN entity AS "
"entity_1 ON entity_1.path LIKE (entity.path || :path_1)"
)
class CompositeSelfRefFKTest(fixtures.MappedTest, AssertsCompiledSQL):
"""Tests a composite FK where, in
the relationship(), one col points
to itself in the same table.
this is a very unusual case::
company employee
---------- ----------
company_id <--- company_id ------+
name ^ |
+------------+
emp_id <---------+
name |
reports_to_id ---+
employee joins to its sub-employees
both on reports_to_id, *and on company_id to itself*.
"""
__dialect__ = 'default'
@classmethod
def define_tables(cls, metadata):
Table('company_t', metadata,
Column('company_id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)))
Table('employee_t', metadata,
Column('company_id', Integer, primary_key=True),
Column('emp_id', Integer, primary_key=True),
Column('name', String(30)),
Column('reports_to_id', Integer),
sa.ForeignKeyConstraint(
['company_id'],
['company_t.company_id']),
sa.ForeignKeyConstraint(
['company_id', 'reports_to_id'],
['employee_t.company_id', 'employee_t.emp_id']))
@classmethod
def setup_classes(cls):
class Company(cls.Basic):
def __init__(self, name):
self.name = name
class Employee(cls.Basic):
def __init__(self, name, company, emp_id, reports_to=None):
self.name = name
self.company = company
self.emp_id = emp_id
self.reports_to = reports_to
def test_explicit(self):
Employee, Company, employee_t, company_t = (self.classes.Employee,
self.classes.Company,
self.tables.employee_t,
self.tables.company_t)
mapper(Company, company_t)
mapper(Employee, employee_t, properties={
'company': relationship(Company,
primaryjoin=employee_t.c.company_id ==
company_t.c.company_id,
backref='employees'),
'reports_to': relationship(Employee, primaryjoin=sa.and_(
employee_t.c.emp_id == employee_t.c.reports_to_id,
employee_t.c.company_id == employee_t.c.company_id
),
remote_side=[employee_t.c.emp_id, employee_t.c.company_id],
foreign_keys=[
employee_t.c.reports_to_id, employee_t.c.company_id],
backref=backref('employees',
foreign_keys=[employee_t.c.reports_to_id,
employee_t.c.company_id]))
})
self._test()
def test_implicit(self):
Employee, Company, employee_t, company_t = (self.classes.Employee,
self.classes.Company,
self.tables.employee_t,
self.tables.company_t)
mapper(Company, company_t)
mapper(Employee, employee_t, properties={
'company': relationship(Company, backref='employees'),
'reports_to': relationship(
Employee,
remote_side=[employee_t.c.emp_id, employee_t.c.company_id],
foreign_keys=[employee_t.c.reports_to_id,
employee_t.c.company_id],
backref=backref(
'employees',
foreign_keys=[
employee_t.c.reports_to_id, employee_t.c.company_id])
)
})
self._test()
def test_very_implicit(self):
Employee, Company, employee_t, company_t = (self.classes.Employee,
self.classes.Company,
self.tables.employee_t,
self.tables.company_t)
mapper(Company, company_t)
mapper(Employee, employee_t, properties={
'company': relationship(Company, backref='employees'),
'reports_to': relationship(
Employee,
remote_side=[employee_t.c.emp_id, employee_t.c.company_id],
backref='employees'
)
})
self._test()
def test_very_explicit(self):
Employee, Company, employee_t, company_t = (self.classes.Employee,
self.classes.Company,
self.tables.employee_t,
self.tables.company_t)
mapper(Company, company_t)
mapper(Employee, employee_t, properties={
'company': relationship(Company, backref='employees'),
'reports_to': relationship(
Employee,
_local_remote_pairs=[
(employee_t.c.reports_to_id, employee_t.c.emp_id),
(employee_t.c.company_id, employee_t.c.company_id)
],
foreign_keys=[
employee_t.c.reports_to_id,
employee_t.c.company_id],
backref=backref(
'employees',
foreign_keys=[
employee_t.c.reports_to_id, employee_t.c.company_id])
)
})
self._test()
def test_annotated(self):
Employee, Company, employee_t, company_t = (self.classes.Employee,
self.classes.Company,
self.tables.employee_t,
self.tables.company_t)
mapper(Company, company_t)
mapper(Employee, employee_t, properties={
'company': relationship(Company, backref='employees'),
'reports_to': relationship(
Employee,
primaryjoin=sa.and_(
remote(employee_t.c.emp_id) == employee_t.c.reports_to_id,
remote(employee_t.c.company_id) == employee_t.c.company_id
),
backref=backref('employees')
)
})
self._assert_lazy_clauses()
self._test()
def test_overlapping_warning(self):
Employee, Company, employee_t, company_t = (self.classes.Employee,
self.classes.Company,
self.tables.employee_t,
self.tables.company_t)
mapper(Company, company_t)
mapper(Employee, employee_t, properties={
'company': relationship(Company, backref='employees'),
'reports_to': relationship(
Employee,
primaryjoin=sa.and_(
remote(employee_t.c.emp_id) == employee_t.c.reports_to_id,
remote(employee_t.c.company_id) == employee_t.c.company_id
),
backref=backref('employees')
)
})
assert_raises_message(
exc.SAWarning,
r"relationship .* will copy column .* to column "
"employee_t.company_id, which conflicts with relationship\(s\)",
configure_mappers
)
def test_annotated_no_overwriting(self):
Employee, Company, employee_t, company_t = (self.classes.Employee,
self.classes.Company,
self.tables.employee_t,
self.tables.company_t)
mapper(Company, company_t)
mapper(Employee, employee_t, properties={
'company': relationship(Company, backref='employees'),
'reports_to': relationship(
Employee,
primaryjoin=sa.and_(
remote(employee_t.c.emp_id) ==
foreign(employee_t.c.reports_to_id),
remote(employee_t.c.company_id) == employee_t.c.company_id
),
backref=backref('employees')
)
})
self._assert_lazy_clauses()
self._test_no_warning()
def _test_no_overwrite(self, sess, expect_failure):
# test [ticket:3230]
Employee, Company = self.classes.Employee, self.classes.Company
c1 = sess.query(Company).filter_by(name='c1').one()
e3 = sess.query(Employee).filter_by(name='emp3').one()
e3.reports_to = None
if expect_failure:
# if foreign() isn't applied specifically to
# employee_t.c.reports_to_id only, then
# employee_t.c.company_id goes foreign as well and then
# this happens
assert_raises_message(
AssertionError,
"Dependency rule tried to blank-out primary key column "
"'employee_t.company_id'",
sess.flush
)
else:
sess.flush()
eq_(e3.company, c1)
@testing.emits_warning("relationship .* will copy column ")
def _test(self):
self._test_no_warning(overwrites=True)
def _test_no_warning(self, overwrites=False):
configure_mappers()
self._test_relationships()
sess = Session()
self._setup_data(sess)
self._test_lazy_relations(sess)
self._test_join_aliasing(sess)
self._test_no_overwrite(sess, expect_failure=overwrites)
@testing.emits_warning("relationship .* will copy column ")
def _assert_lazy_clauses(self):
configure_mappers()
Employee = self.classes.Employee
self.assert_compile(
Employee.employees.property.strategy._lazywhere,
":param_1 = employee_t.reports_to_id AND "
":param_2 = employee_t.company_id"
)
self.assert_compile(
Employee.employees.property.strategy._rev_lazywhere,
"employee_t.emp_id = :param_1 AND "
"employee_t.company_id = :param_2"
)
def _test_relationships(self):
Employee = self.classes.Employee
employee_t = self.tables.employee_t
eq_(
set(Employee.employees.property.local_remote_pairs),
set([
(employee_t.c.company_id, employee_t.c.company_id),
(employee_t.c.emp_id, employee_t.c.reports_to_id),
])
)
eq_(
Employee.employees.property.remote_side,
set([employee_t.c.company_id, employee_t.c.reports_to_id])
)
eq_(
set(Employee.reports_to.property.local_remote_pairs),
set([
(employee_t.c.company_id, employee_t.c.company_id),
(employee_t.c.reports_to_id, employee_t.c.emp_id),
])
)
def _setup_data(self, sess):
Employee, Company = self.classes.Employee, self.classes.Company
c1 = Company('c1')
c2 = Company('c2')
e1 = Employee('emp1', c1, 1)
e2 = Employee('emp2', c1, 2, e1) # noqa
e3 = Employee('emp3', c1, 3, e1)
e4 = Employee('emp4', c1, 4, e3) # noqa
e5 = Employee('emp5', c2, 1)
e6 = Employee('emp6', c2, 2, e5) # noqa
e7 = Employee('emp7', c2, 3, e5) # noqa
sess.add_all((c1, c2))
sess.commit()
sess.close()
def _test_lazy_relations(self, sess):
Employee, Company = self.classes.Employee, self.classes.Company
c1 = sess.query(Company).filter_by(name='c1').one()
c2 = sess.query(Company).filter_by(name='c2').one()
e1 = sess.query(Employee).filter_by(name='emp1').one()
e5 = sess.query(Employee).filter_by(name='emp5').one()
test_e1 = sess.query(Employee).get([c1.company_id, e1.emp_id])
assert test_e1.name == 'emp1', test_e1.name
test_e5 = sess.query(Employee).get([c2.company_id, e5.emp_id])
assert test_e5.name == 'emp5', test_e5.name
assert [x.name for x in test_e1.employees] == ['emp2', 'emp3']
assert sess.query(Employee).\
get([c1.company_id, 3]).reports_to.name == 'emp1'
assert sess.query(Employee).\
get([c2.company_id, 3]).reports_to.name == 'emp5'
def _test_join_aliasing(self, sess):
Employee, Company = self.classes.Employee, self.classes.Company
eq_(
[n for n, in sess.query(Employee.name).
join(Employee.reports_to, aliased=True).
filter_by(name='emp5').
reset_joinpoint().
order_by(Employee.name)],
['emp6', 'emp7']
)
class CompositeJoinPartialFK(fixtures.MappedTest, AssertsCompiledSQL):
__dialect__ = 'default'
@classmethod
def define_tables(cls, metadata):
Table("parent", metadata,
Column('x', Integer, primary_key=True),
Column('y', Integer, primary_key=True),
Column('z', Integer),
)
Table("child", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('x', Integer),
Column('y', Integer),
Column('z', Integer),
# note 'z' is not here
sa.ForeignKeyConstraint(
["x", "y"],
["parent.x", "parent.y"]
)
)
@classmethod
def setup_mappers(cls):
parent, child = cls.tables.parent, cls.tables.child
class Parent(cls.Comparable):
pass
class Child(cls.Comparable):
pass
mapper(Parent, parent, properties={
'children': relationship(Child, primaryjoin=and_(
parent.c.x == child.c.x,
parent.c.y == child.c.y,
parent.c.z == child.c.z,
))
})
mapper(Child, child)
def test_joins_fully(self):
Parent, Child = self.classes.Parent, self.classes.Child
self.assert_compile(
Parent.children.property.strategy._lazywhere,
":param_1 = child.x AND :param_2 = child.y AND :param_3 = child.z"
)
class SynonymsAsFKsTest(fixtures.MappedTest):
"""Syncrules on foreign keys that are also primary"""
@classmethod
def define_tables(cls, metadata):
Table("tableA", metadata,
Column("id", Integer, primary_key=True),
Column("foo", Integer,),
test_needs_fk=True)
Table("tableB", metadata,
Column("id", Integer, primary_key=True),
Column("_a_id", Integer, key='a_id', primary_key=True),
test_needs_fk=True)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
@property
def a_id(self):
return self._a_id
def test_synonym_fk(self):
"""test that active history is enabled on a
one-to-many/one that has use_get==True"""
tableB, A, B, tableA = (self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA)
mapper(B, tableB, properties={
'a_id': synonym('_a_id', map_column=True)})
mapper(A, tableA, properties={
'b': relationship(B, primaryjoin=(tableA.c.id == foreign(B.a_id)),
uselist=False)})
sess = create_session()
b = B(id=0)
a = A(id=0, b=b)
sess.add(a)
sess.add(b)
sess.flush()
sess.expunge_all()
assert a.b == b
assert a.id == b.a_id
assert a.id == b._a_id
class FKsAsPksTest(fixtures.MappedTest):
"""Syncrules on foreign keys that are also primary"""
@classmethod
def define_tables(cls, metadata):
Table("tableA", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("foo", Integer,),
test_needs_fk=True)
Table("tableB", metadata,
Column("id", Integer, ForeignKey("tableA.id"), primary_key=True),
test_needs_fk=True)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
pass
def test_onetoone_switch(self):
"""test that active history is enabled on a
one-to-many/one that has use_get==True"""
tableB, A, B, tableA = (self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA)
mapper(A, tableA, properties={
'b': relationship(B, cascade="all,delete-orphan", uselist=False)})
mapper(B, tableB)
configure_mappers()
assert A.b.property.strategy.use_get
sess = create_session()
a1 = A()
sess.add(a1)
sess.flush()
sess.close()
a1 = sess.query(A).first()
a1.b = B()
sess.flush()
def test_no_delete_PK_AtoB(self):
"""A cant be deleted without B because B would have no PK value."""
tableB, A, B, tableA = (self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA)
mapper(A, tableA, properties={
'bs': relationship(B, cascade="save-update")})
mapper(B, tableB)
a1 = A()
a1.bs.append(B())
sess = create_session()
sess.add(a1)
sess.flush()
sess.delete(a1)
try:
sess.flush()
assert False
except AssertionError as e:
startswith_(str(e),
"Dependency rule tried to blank-out "
"primary key column 'tableB.id' on instance ")
def test_no_delete_PK_BtoA(self):
tableB, A, B, tableA = (self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA)
mapper(B, tableB, properties={
'a': relationship(A, cascade="save-update")})
mapper(A, tableA)
b1 = B()
a1 = A()
b1.a = a1
sess = create_session()
sess.add(b1)
sess.flush()
b1.a = None
try:
sess.flush()
assert False
except AssertionError as e:
startswith_(str(e),
"Dependency rule tried to blank-out "
"primary key column 'tableB.id' on instance ")
@testing.fails_on_everything_except('sqlite', 'mysql')
def test_nullPKsOK_BtoA(self):
A, tableA = self.classes.A, self.tables.tableA
# postgresql cant handle a nullable PK column...?
tableC = Table(
'tablec', tableA.metadata,
Column('id', Integer, primary_key=True),
Column('a_id', Integer, ForeignKey('tableA.id'),
primary_key=True, nullable=True))
tableC.create()
class C(fixtures.BasicEntity):
pass
mapper(C, tableC, properties={
'a': relationship(A, cascade="save-update")
})
mapper(A, tableA)
c1 = C()
c1.id = 5
c1.a = None
sess = create_session()
sess.add(c1)
# test that no error is raised.
sess.flush()
def test_delete_cascade_BtoA(self):
"""No 'blank the PK' error when the child is to
be deleted as part of a cascade"""
tableB, A, B, tableA = (self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA)
for cascade in ("save-update, delete",
#"save-update, delete-orphan",
"save-update, delete, delete-orphan"):
mapper(B, tableB, properties={
'a': relationship(A, cascade=cascade, single_parent=True)
})
mapper(A, tableA)
b1 = B()
a1 = A()
b1.a = a1
sess = create_session()
sess.add(b1)
sess.flush()
sess.delete(b1)
sess.flush()
assert a1 not in sess
assert b1 not in sess
sess.expunge_all()
sa.orm.clear_mappers()
def test_delete_cascade_AtoB(self):
"""No 'blank the PK' error when the child is to
be deleted as part of a cascade"""
tableB, A, B, tableA = (self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA)
for cascade in ("save-update, delete",
#"save-update, delete-orphan",
"save-update, delete, delete-orphan"):
mapper(A, tableA, properties={
'bs': relationship(B, cascade=cascade)
})
mapper(B, tableB)
a1 = A()
b1 = B()
a1.bs.append(b1)
sess = create_session()
sess.add(a1)
sess.flush()
sess.delete(a1)
sess.flush()
assert a1 not in sess
assert b1 not in sess
sess.expunge_all()
sa.orm.clear_mappers()
def test_delete_manual_AtoB(self):
tableB, A, B, tableA = (self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA)
mapper(A, tableA, properties={
'bs': relationship(B, cascade="none")})
mapper(B, tableB)
a1 = A()
b1 = B()
a1.bs.append(b1)
sess = create_session()
sess.add(a1)
sess.add(b1)
sess.flush()
sess.delete(a1)
sess.delete(b1)
sess.flush()
assert a1 not in sess
assert b1 not in sess
sess.expunge_all()
def test_delete_manual_BtoA(self):
tableB, A, B, tableA = (self.tables.tableB,
self.classes.A,
self.classes.B,
self.tables.tableA)
mapper(B, tableB, properties={
'a': relationship(A, cascade="none")})
mapper(A, tableA)
b1 = B()
a1 = A()
b1.a = a1
sess = create_session()
sess.add(b1)
sess.add(a1)
sess.flush()
sess.delete(b1)
sess.delete(a1)
sess.flush()
assert a1 not in sess
assert b1 not in sess
class UniqueColReferenceSwitchTest(fixtures.MappedTest):
"""test a relationship based on a primary
join against a unique non-pk column"""
@classmethod
def define_tables(cls, metadata):
Table("table_a", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("ident", String(10), nullable=False,
unique=True),
)
Table("table_b", metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("a_ident", String(10),
ForeignKey('table_a.ident'),
nullable=False),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
def test_switch_parent(self):
A, B, table_b, table_a = (self.classes.A,
self.classes.B,
self.tables.table_b,
self.tables.table_a)
mapper(A, table_a)
mapper(B, table_b, properties={"a": relationship(A, backref="bs")})
session = create_session()
a1, a2 = A(ident="uuid1"), A(ident="uuid2")
session.add_all([a1, a2])
a1.bs = [
B(), B()
]
session.flush()
session.expire_all()
a1, a2 = session.query(A).all()
for b in list(a1.bs):
b.a = a2
session.delete(a1)
session.flush()
class RelationshipToSelectableTest(fixtures.MappedTest):
"""Test a map to a select that relates to a map to the table."""
@classmethod
def define_tables(cls, metadata):
Table('items', metadata,
Column('item_policy_num', String(10), primary_key=True,
key='policyNum'),
Column('item_policy_eff_date', sa.Date, primary_key=True,
key='policyEffDate'),
Column('item_type', String(20), primary_key=True,
key='type'),
Column('item_id', Integer, primary_key=True,
key='id', autoincrement=False))
def test_basic(self):
items = self.tables.items
class Container(fixtures.BasicEntity):
pass
class LineItem(fixtures.BasicEntity):
pass
container_select = sa.select(
[items.c.policyNum, items.c.policyEffDate, items.c.type],
distinct=True,
).alias('container_select')
mapper(LineItem, items)
mapper(
Container,
container_select,
properties=dict(
lineItems=relationship(
LineItem,
lazy='select',
cascade='all, delete-orphan',
order_by=sa.asc(items.c.id),
primaryjoin=sa.and_(
container_select.c.policyNum == items.c.policyNum,
container_select.c.policyEffDate ==
items.c.policyEffDate,
container_select.c.type == items.c.type),
foreign_keys=[
items.c.policyNum,
items.c.policyEffDate,
items.c.type
]
)
)
)
session = create_session()
con = Container()
con.policyNum = "99"
con.policyEffDate = datetime.date.today()
con.type = "TESTER"
session.add(con)
for i in range(0, 10):
li = LineItem()
li.id = i
con.lineItems.append(li)
session.add(li)
session.flush()
session.expunge_all()
newcon = session.query(Container).\
order_by(container_select.c.type).first()
assert con.policyNum == newcon.policyNum
assert len(newcon.lineItems) == 10
for old, new in zip(con.lineItems, newcon.lineItems):
eq_(old.id, new.id)
class FKEquatedToConstantTest(fixtures.MappedTest):
"""test a relationship with a non-column entity in the primary join,
is not viewonly, and also has the non-column's clause mentioned in the
foreign keys list.
"""
@classmethod
def define_tables(cls, metadata):
Table('tags', metadata, Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column("data", String(50)),
)
Table('tag_foo', metadata,
Column("id", Integer, primary_key=True,
test_needs_autoincrement=True),
Column('tagid', Integer),
Column("data", String(50)),
)
def test_basic(self):
tag_foo, tags = self.tables.tag_foo, self.tables.tags
class Tag(fixtures.ComparableEntity):
pass
class TagInstance(fixtures.ComparableEntity):
pass
mapper(Tag, tags, properties={
'foo': relationship(
TagInstance,
primaryjoin=sa.and_(tag_foo.c.data == 'iplc_case',
tag_foo.c.tagid == tags.c.id),
foreign_keys=[tag_foo.c.tagid, tag_foo.c.data]),
})
mapper(TagInstance, tag_foo)
sess = create_session()
t1 = Tag(data='some tag')
t1.foo.append(TagInstance(data='iplc_case'))
t1.foo.append(TagInstance(data='not_iplc_case'))
sess.add(t1)
sess.flush()
sess.expunge_all()
# relationship works
eq_(
sess.query(Tag).all(),
[Tag(data='some tag', foo=[TagInstance(data='iplc_case')])]
)
# both TagInstances were persisted
eq_(
sess.query(TagInstance).order_by(TagInstance.data).all(),
[TagInstance(data='iplc_case'), TagInstance(data='not_iplc_case')]
)
class BackrefPropagatesForwardsArgs(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50))
)
Table('addresses', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('user_id', Integer),
Column('email', String(50))
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class Address(cls.Comparable):
pass
def test_backref(self):
User, Address, users, addresses = (self.classes.User,
self.classes.Address,
self.tables.users,
self.tables.addresses)
mapper(User, users, properties={
'addresses': relationship(
Address,
primaryjoin=addresses.c.user_id == users.c.id,
foreign_keys=addresses.c.user_id,
backref='user')
})
mapper(Address, addresses)
sess = sessionmaker()()
u1 = User(name='u1', addresses=[Address(email='a1')])
sess.add(u1)
sess.commit()
eq_(sess.query(Address).all(), [
Address(email='a1', user=User(name='u1'))
])
class AmbiguousJoinInterpretedAsSelfRef(fixtures.MappedTest):
"""test ambiguous joins due to FKs on both sides treated as
self-referential.
this mapping is very similar to that of
test/orm/inheritance/query.py
SelfReferentialTestJoinedToBase , except that inheritance is
not used here.
"""
@classmethod
def define_tables(cls, metadata):
Table(
'subscriber', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True))
Table(
'address', metadata,
Column(
'subscriber_id', Integer,
ForeignKey('subscriber.id'), primary_key=True),
Column('type', String(1), primary_key=True),
)
@classmethod
def setup_mappers(cls):
subscriber, address = cls.tables.subscriber, cls.tables.address
subscriber_and_address = subscriber.join(
address,
and_(address.c.subscriber_id == subscriber.c.id,
address.c.type.in_(['A', 'B', 'C'])))
class Address(cls.Comparable):
pass
class Subscriber(cls.Comparable):
pass
mapper(Address, address)
mapper(Subscriber, subscriber_and_address, properties={
'id': [subscriber.c.id, address.c.subscriber_id],
'addresses': relationship(Address,
backref=backref("customer"))
})
def test_mapping(self):
Subscriber, Address = self.classes.Subscriber, self.classes.Address
sess = create_session()
assert Subscriber.addresses.property.direction is ONETOMANY
assert Address.customer.property.direction is MANYTOONE
s1 = Subscriber(type='A',
addresses=[
Address(type='D'),
Address(type='E'),
]
)
a1 = Address(type='B', customer=Subscriber(type='C'))
assert s1.addresses[0].customer is s1
assert a1.customer.addresses[0] is a1
sess.add_all([s1, a1])
sess.flush()
sess.expunge_all()
eq_(
sess.query(Subscriber).order_by(Subscriber.type).all(),
[
Subscriber(id=1, type='A'),
Subscriber(id=2, type='B'),
Subscriber(id=2, type='C')
]
)
class ManualBackrefTest(_fixtures.FixtureTest):
"""Test explicit relationships that are backrefs to each other."""
run_inserts = None
def test_o2m(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address, back_populates='user')
})
mapper(Address, addresses, properties={
'user': relationship(User, back_populates='addresses')
})
sess = create_session()
u1 = User(name='u1')
a1 = Address(email_address='foo')
u1.addresses.append(a1)
assert a1.user is u1
sess.add(u1)
sess.flush()
sess.expire_all()
assert sess.query(Address).one() is a1
assert a1.user is u1
assert a1 in u1.addresses
def test_invalid_key(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(User, users, properties={
'addresses': relationship(Address, back_populates='userr')
})
mapper(Address, addresses, properties={
'user': relationship(User, back_populates='addresses')
})
assert_raises(sa.exc.InvalidRequestError, configure_mappers)
def test_invalid_target(self):
addresses, Dingaling, User, dingalings, Address, users = (
self.tables.addresses,
self.classes.Dingaling,
self.classes.User,
self.tables.dingalings,
self.classes.Address,
self.tables.users)
mapper(User, users, properties={
'addresses': relationship(Address, back_populates='dingaling'),
})
mapper(Dingaling, dingalings)
mapper(Address, addresses, properties={
'dingaling': relationship(Dingaling)
})
assert_raises_message(sa.exc.ArgumentError,
r"reverse_property 'dingaling' on relationship "
"User.addresses references "
"relationship Address.dingaling, which does not "
"reference mapper Mapper\|User\|users",
configure_mappers)
class JoinConditionErrorTest(fixtures.TestBase):
def test_clauseelement_pj(self):
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class C1(Base):
__tablename__ = 'c1'
id = Column('id', Integer, primary_key=True)
class C2(Base):
__tablename__ = 'c2'
id = Column('id', Integer, primary_key=True)
c1id = Column('c1id', Integer, ForeignKey('c1.id'))
c2 = relationship(C1, primaryjoin=C1.id)
assert_raises(sa.exc.ArgumentError, configure_mappers)
def test_clauseelement_pj_false(self):
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class C1(Base):
__tablename__ = 'c1'
id = Column('id', Integer, primary_key=True)
class C2(Base):
__tablename__ = 'c2'
id = Column('id', Integer, primary_key=True)
c1id = Column('c1id', Integer, ForeignKey('c1.id'))
c2 = relationship(C1, primaryjoin="x" == "y")
assert_raises(sa.exc.ArgumentError, configure_mappers)
def test_only_column_elements(self):
m = MetaData()
t1 = Table('t1', m,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer, ForeignKey('t2.id')),
)
t2 = Table('t2', m,
Column('id', Integer, primary_key=True),
)
class C1(object):
pass
class C2(object):
pass
mapper(C1, t1, properties={
'c2': relationship(C2, primaryjoin=t1.join(t2))})
mapper(C2, t2)
assert_raises(sa.exc.ArgumentError, configure_mappers)
def test_invalid_string_args(self):
from sqlalchemy.ext.declarative import declarative_base
for argname, arg in [
('remote_side', ['c1.id']),
('remote_side', ['id']),
('foreign_keys', ['c1id']),
('foreign_keys', ['C2.c1id']),
('order_by', ['id']),
]:
clear_mappers()
kw = {argname: arg}
Base = declarative_base()
class C1(Base):
__tablename__ = 'c1'
id = Column('id', Integer, primary_key=True)
class C2(Base):
__tablename__ = 'c2'
id_ = Column('id', Integer, primary_key=True)
c1id = Column('c1id', Integer, ForeignKey('c1.id'))
c2 = relationship(C1, **kw)
assert_raises_message(
sa.exc.ArgumentError,
"Column-based expression object expected "
"for argument '%s'; got: '%s', type %r" %
(argname, arg[0], type(arg[0])),
configure_mappers)
def test_fk_error_not_raised_unrelated(self):
m = MetaData()
t1 = Table('t1', m,
Column('id', Integer, primary_key=True),
Column('foo_id', Integer, ForeignKey('t2.nonexistent_id')),
)
t2 = Table('t2', m, # noqa
Column('id', Integer, primary_key=True),
)
t3 = Table('t3', m,
Column('id', Integer, primary_key=True),
Column('t1id', Integer, ForeignKey('t1.id'))
)
class C1(object):
pass
class C2(object):
pass
mapper(C1, t1, properties={'c2': relationship(C2)})
mapper(C2, t3)
assert C1.c2.property.primaryjoin.compare(t1.c.id == t3.c.t1id)
def test_join_error_raised(self):
m = MetaData()
t1 = Table('t1', m,
Column('id', Integer, primary_key=True),
)
t2 = Table('t2', m, # noqa
Column('id', Integer, primary_key=True),
)
t3 = Table('t3', m,
Column('id', Integer, primary_key=True),
Column('t1id', Integer)
)
class C1(object):
pass
class C2(object):
pass
mapper(C1, t1, properties={'c2': relationship(C2)})
mapper(C2, t3)
assert_raises(sa.exc.ArgumentError, configure_mappers)
def teardown(self):
clear_mappers()
class TypeMatchTest(fixtures.MappedTest):
"""test errors raised when trying to add items
whose type is not handled by a relationship"""
@classmethod
def define_tables(cls, metadata):
Table("a", metadata,
Column('aid', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('adata', String(30)))
Table("b", metadata,
Column('bid', Integer, primary_key=True,
test_needs_autoincrement=True),
Column("a_id", Integer, ForeignKey("a.aid")),
Column('bdata', String(30)))
Table("c", metadata,
Column('cid', Integer, primary_key=True,
test_needs_autoincrement=True),
Column("b_id", Integer, ForeignKey("b.bid")),
Column('cdata', String(30)))
Table("d", metadata,
Column('did', Integer, primary_key=True,
test_needs_autoincrement=True),
Column("a_id", Integer, ForeignKey("a.aid")),
Column('ddata', String(30)))
def test_o2m_oncascade(self):
a, c, b = (self.tables.a,
self.tables.c,
self.tables.b)
class A(fixtures.BasicEntity):
pass
class B(fixtures.BasicEntity):
pass
class C(fixtures.BasicEntity):
pass
mapper(A, a, properties={'bs': relationship(B)})
mapper(B, b)
mapper(C, c)
a1 = A()
b1 = B()
c1 = C()
a1.bs.append(b1)
a1.bs.append(c1)
sess = create_session()
try:
sess.add(a1)
assert False
except AssertionError as err:
eq_(str(err),
"Attribute 'bs' on class '%s' doesn't handle "
"objects of type '%s'" % (A, C))
def test_o2m_onflush(self):
a, c, b = (self.tables.a,
self.tables.c,
self.tables.b)
class A(fixtures.BasicEntity):
pass
class B(fixtures.BasicEntity):
pass
class C(fixtures.BasicEntity):
pass
mapper(A, a, properties={'bs': relationship(B, cascade="none")})
mapper(B, b)
mapper(C, c)
a1 = A()
b1 = B()
c1 = C()
a1.bs.append(b1)
a1.bs.append(c1)
sess = create_session()
sess.add(a1)
sess.add(b1)
sess.add(c1)
assert_raises_message(sa.orm.exc.FlushError,
"Attempting to flush an item",
sess.flush)
def test_o2m_nopoly_onflush(self):
a, c, b = (self.tables.a,
self.tables.c,
self.tables.b)
class A(fixtures.BasicEntity):
pass
class B(fixtures.BasicEntity):
pass
class C(B):
pass
mapper(A, a, properties={'bs': relationship(B, cascade="none")})
mapper(B, b)
mapper(C, c, inherits=B)
a1 = A()
b1 = B()
c1 = C()
a1.bs.append(b1)
a1.bs.append(c1)
sess = create_session()
sess.add(a1)
sess.add(b1)
sess.add(c1)
assert_raises_message(sa.orm.exc.FlushError,
"Attempting to flush an item",
sess.flush)
def test_m2o_nopoly_onflush(self):
a, b, d = (self.tables.a,
self.tables.b,
self.tables.d)
class A(fixtures.BasicEntity):
pass
class B(A):
pass
class D(fixtures.BasicEntity):
pass
mapper(A, a)
mapper(B, b, inherits=A)
mapper(D, d, properties={"a": relationship(A, cascade="none")})
b1 = B()
d1 = D()
d1.a = b1
sess = create_session()
sess.add(b1)
sess.add(d1)
assert_raises_message(sa.orm.exc.FlushError,
"Attempting to flush an item",
sess.flush)
def test_m2o_oncascade(self):
a, b, d = (self.tables.a,
self.tables.b,
self.tables.d)
class A(fixtures.BasicEntity):
pass
class B(fixtures.BasicEntity):
pass
class D(fixtures.BasicEntity):
pass
mapper(A, a)
mapper(B, b)
mapper(D, d, properties={"a": relationship(A)})
b1 = B()
d1 = D()
d1.a = b1
sess = create_session()
assert_raises_message(AssertionError,
"doesn't handle objects of type",
sess.add, d1)
class TypedAssociationTable(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
class MySpecialType(sa.types.TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
return "lala" + value
def process_result_value(self, value, dialect):
return value[4:]
Table('t1', metadata,
Column('col1', MySpecialType(30), primary_key=True),
Column('col2', String(30)))
Table('t2', metadata,
Column('col1', MySpecialType(30), primary_key=True),
Column('col2', String(30)))
Table('t3', metadata,
Column('t1c1', MySpecialType(30), ForeignKey('t1.col1')),
Column('t2c1', MySpecialType(30), ForeignKey('t2.col1')))
def test_m2m(self):
"""Many-to-many tables with special types for candidate keys."""
t2, t3, t1 = (self.tables.t2,
self.tables.t3,
self.tables.t1)
class T1(fixtures.BasicEntity):
pass
class T2(fixtures.BasicEntity):
pass
mapper(T2, t2)
mapper(T1, t1, properties={
't2s': relationship(T2, secondary=t3, backref='t1s')})
a = T1()
a.col1 = "aid"
b = T2()
b.col1 = "bid"
c = T2()
c.col1 = "cid"
a.t2s.append(b)
a.t2s.append(c)
sess = create_session()
sess.add(a)
sess.flush()
assert t3.count().scalar() == 2
a.t2s.remove(c)
sess.flush()
assert t3.count().scalar() == 1
class CustomOperatorTest(fixtures.MappedTest, AssertsCompiledSQL):
"""test op() in conjunction with join conditions"""
run_create_tables = run_deletes = None
__dialect__ = 'default'
@classmethod
def define_tables(cls, metadata):
Table('a', metadata,
Column('id', Integer, primary_key=True),
Column('foo', String(50))
)
Table('b', metadata,
Column('id', Integer, primary_key=True),
Column('foo', String(50))
)
def test_join_on_custom_op(self):
class A(fixtures.BasicEntity):
pass
class B(fixtures.BasicEntity):
pass
mapper(A, self.tables.a, properties={
'bs': relationship(B,
primaryjoin=self.tables.a.c.foo.op(
'&*', is_comparison=True
)(foreign(self.tables.b.c.foo)),
viewonly=True
)
})
mapper(B, self.tables.b)
self.assert_compile(
Session().query(A).join(A.bs),
"SELECT a.id AS a_id, a.foo AS a_foo "
"FROM a JOIN b ON a.foo &* b.foo"
)
class ViewOnlyHistoryTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("t1", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)))
Table("t2", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)),
Column('t1id', Integer, ForeignKey('t1.id')))
def _assert_fk(self, a1, b1, is_set):
s = Session(testing.db)
s.add_all([a1, b1])
s.flush()
if is_set:
eq_(b1.t1id, a1.id)
else:
eq_(b1.t1id, None)
return s
def test_o2m_viewonly_oneside(self):
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, self.tables.t1, properties={
"bs": relationship(B, viewonly=True,
backref=backref("a", viewonly=False))
})
mapper(B, self.tables.t2)
a1 = A()
b1 = B()
a1.bs.append(b1)
assert b1.a is a1
assert not inspect(a1).attrs.bs.history.has_changes()
assert inspect(b1).attrs.a.history.has_changes()
sess = self._assert_fk(a1, b1, True)
a1.bs.remove(b1)
assert a1 not in sess.dirty
assert b1 in sess.dirty
def test_m2o_viewonly_oneside(self):
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, self.tables.t1, properties={
"bs": relationship(B, viewonly=False,
backref=backref("a", viewonly=True))
})
mapper(B, self.tables.t2)
a1 = A()
b1 = B()
b1.a = a1
assert b1 in a1.bs
assert inspect(a1).attrs.bs.history.has_changes()
assert not inspect(b1).attrs.a.history.has_changes()
sess = self._assert_fk(a1, b1, True)
a1.bs.remove(b1)
assert a1 in sess.dirty
assert b1 not in sess.dirty
def test_o2m_viewonly_only(self):
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, self.tables.t1, properties={
"bs": relationship(B, viewonly=True)
})
mapper(B, self.tables.t2)
a1 = A()
b1 = B()
a1.bs.append(b1)
assert not inspect(a1).attrs.bs.history.has_changes()
self._assert_fk(a1, b1, False)
def test_m2o_viewonly_only(self):
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, self.tables.t1)
mapper(B, self.tables.t2, properties={
'a': relationship(A, viewonly=True)
})
a1 = A()
b1 = B()
b1.a = a1
assert not inspect(b1).attrs.a.history.has_changes()
self._assert_fk(a1, b1, False)
class ViewOnlyM2MBackrefTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("t1", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)))
Table("t2", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)),
)
Table("t1t2", metadata,
Column('t1id', Integer, ForeignKey('t1.id'), primary_key=True),
Column('t2id', Integer, ForeignKey('t2.id'), primary_key=True),
)
def test_viewonly(self):
t1t2, t2, t1 = (self.tables.t1t2,
self.tables.t2,
self.tables.t1)
class A(fixtures.ComparableEntity):
pass
class B(fixtures.ComparableEntity):
pass
mapper(A, t1, properties={
'bs': relationship(B, secondary=t1t2,
backref=backref('as_', viewonly=True))
})
mapper(B, t2)
sess = create_session()
a1 = A()
b1 = B(as_=[a1])
assert not inspect(b1).attrs.as_.history.has_changes()
sess.add(a1)
sess.flush()
eq_(
sess.query(A).first(), A(bs=[B(id=b1.id)])
)
eq_(
sess.query(B).first(), B(as_=[A(id=a1.id)])
)
class ViewOnlyOverlappingNames(fixtures.MappedTest):
"""'viewonly' mappings with overlapping PK column names."""
@classmethod
def define_tables(cls, metadata):
Table("t1", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)))
Table("t2", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)),
Column('t1id', Integer, ForeignKey('t1.id')))
Table("t3", metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)),
Column('t2id', Integer, ForeignKey('t2.id')))
def test_three_table_view(self):
"""A three table join with overlapping PK names.
A third table is pulled into the primary join condition using
overlapping PK column names and should not produce 'conflicting column'
error.
"""
t2, t3, t1 = (self.tables.t2,
self.tables.t3,
self.tables.t1)
class C1(fixtures.BasicEntity):
pass
class C2(fixtures.BasicEntity):
pass
class C3(fixtures.BasicEntity):
pass
mapper(C1, t1, properties={
't2s': relationship(C2),
't2_view': relationship(
C2,
viewonly=True,
primaryjoin=sa.and_(t1.c.id == t2.c.t1id,
t3.c.t2id == t2.c.id,
t3.c.data == t1.c.data))})
mapper(C2, t2)
mapper(C3, t3, properties={
't2': relationship(C2)})
c1 = C1()
c1.data = 'c1data'
c2a = C2()
c1.t2s.append(c2a)
c2b = C2()
c1.t2s.append(c2b)
c3 = C3()
c3.data = 'c1data'
c3.t2 = c2b
sess = create_session()
sess.add(c1)
sess.add(c3)
sess.flush()
sess.expunge_all()
c1 = sess.query(C1).get(c1.id)
assert set([x.id for x in c1.t2s]) == set([c2a.id, c2b.id])
assert set([x.id for x in c1.t2_view]) == set([c2b.id])
class ViewOnlyUniqueNames(fixtures.MappedTest):
"""'viewonly' mappings with unique PK column names."""
@classmethod
def define_tables(cls, metadata):
Table("t1", metadata,
Column('t1id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)))
Table("t2", metadata,
Column('t2id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)),
Column('t1id_ref', Integer, ForeignKey('t1.t1id')))
Table("t3", metadata,
Column('t3id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(40)),
Column('t2id_ref', Integer, ForeignKey('t2.t2id')))
def test_three_table_view(self):
"""A three table join with overlapping PK names.
A third table is pulled into the primary join condition using unique
PK column names and should not produce 'mapper has no columnX' error.
"""
t2, t3, t1 = (self.tables.t2,
self.tables.t3,
self.tables.t1)
class C1(fixtures.BasicEntity):
pass
class C2(fixtures.BasicEntity):
pass
class C3(fixtures.BasicEntity):
pass
mapper(C1, t1, properties={
't2s': relationship(C2),
't2_view': relationship(
C2,
viewonly=True,
primaryjoin=sa.and_(t1.c.t1id == t2.c.t1id_ref,
t3.c.t2id_ref == t2.c.t2id,
t3.c.data == t1.c.data))})
mapper(C2, t2)
mapper(C3, t3, properties={
't2': relationship(C2)})
c1 = C1()
c1.data = 'c1data'
c2a = C2()
c1.t2s.append(c2a)
c2b = C2()
c1.t2s.append(c2b)
c3 = C3()
c3.data = 'c1data'
c3.t2 = c2b
sess = create_session()
sess.add_all((c1, c3))
sess.flush()
sess.expunge_all()
c1 = sess.query(C1).get(c1.t1id)
assert set([x.t2id for x in c1.t2s]) == set([c2a.t2id, c2b.t2id])
assert set([x.t2id for x in c1.t2_view]) == set([c2b.t2id])
class ViewOnlyLocalRemoteM2M(fixtures.TestBase):
"""test that local-remote is correctly determined for m2m"""
def test_local_remote(self):
meta = MetaData()
t1 = Table('t1', meta,
Column('id', Integer, primary_key=True),
)
t2 = Table('t2', meta,
Column('id', Integer, primary_key=True),
)
t12 = Table('tab', meta,
Column('t1_id', Integer, ForeignKey('t1.id',)),
Column('t2_id', Integer, ForeignKey('t2.id',)),
)
class A(object):
pass
class B(object):
pass
mapper(B, t2, )
m = mapper(A, t1, properties=dict(
b_view=relationship(B, secondary=t12, viewonly=True),
b_plain=relationship(B, secondary=t12),
)
)
configure_mappers()
assert m.get_property('b_view').local_remote_pairs == \
m.get_property('b_plain').local_remote_pairs == \
[(t1.c.id, t12.c.t1_id), (t2.c.id, t12.c.t2_id)]
class ViewOnlyNonEquijoin(fixtures.MappedTest):
"""'viewonly' mappings based on non-equijoins."""
@classmethod
def define_tables(cls, metadata):
Table('foos', metadata,
Column('id', Integer, primary_key=True))
Table('bars', metadata,
Column('id', Integer, primary_key=True),
Column('fid', Integer))
def test_viewonly_join(self):
bars, foos = self.tables.bars, self.tables.foos
class Foo(fixtures.ComparableEntity):
pass
class Bar(fixtures.ComparableEntity):
pass
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=foos.c.id > bars.c.fid,
foreign_keys=[bars.c.fid],
viewonly=True)})
mapper(Bar, bars)
sess = create_session()
sess.add_all((Foo(id=4),
Foo(id=9),
Bar(id=1, fid=2),
Bar(id=2, fid=3),
Bar(id=3, fid=6),
Bar(id=4, fid=7)))
sess.flush()
sess = create_session()
eq_(sess.query(Foo).filter_by(id=4).one(),
Foo(id=4, bars=[Bar(fid=2), Bar(fid=3)]))
eq_(sess.query(Foo).filter_by(id=9).one(),
Foo(id=9, bars=[Bar(fid=2), Bar(fid=3), Bar(fid=6), Bar(fid=7)]))
class ViewOnlyRepeatedRemoteColumn(fixtures.MappedTest):
"""'viewonly' mappings that contain the same 'remote' column twice"""
@classmethod
def define_tables(cls, metadata):
Table('foos', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('bid1', Integer, ForeignKey('bars.id')),
Column('bid2', Integer, ForeignKey('bars.id')))
Table('bars', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
def test_relationship_on_or(self):
bars, foos = self.tables.bars, self.tables.foos
class Foo(fixtures.ComparableEntity):
pass
class Bar(fixtures.ComparableEntity):
pass
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=sa.or_(bars.c.id == foos.c.bid1,
bars.c.id == foos.c.bid2),
uselist=True,
viewonly=True)})
mapper(Bar, bars)
sess = create_session()
b1 = Bar(id=1, data='b1')
b2 = Bar(id=2, data='b2')
b3 = Bar(id=3, data='b3')
f1 = Foo(bid1=1, bid2=2)
f2 = Foo(bid1=3, bid2=None)
sess.add_all((b1, b2, b3))
sess.flush()
sess.add_all((f1, f2))
sess.flush()
sess.expunge_all()
eq_(sess.query(Foo).filter_by(id=f1.id).one(),
Foo(bars=[Bar(data='b1'), Bar(data='b2')]))
eq_(sess.query(Foo).filter_by(id=f2.id).one(),
Foo(bars=[Bar(data='b3')]))
class ViewOnlyRepeatedLocalColumn(fixtures.MappedTest):
"""'viewonly' mappings that contain the same 'local' column twice"""
@classmethod
def define_tables(cls, metadata):
Table('foos', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
Table('bars', metadata, Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('fid1', Integer, ForeignKey('foos.id')),
Column('fid2', Integer, ForeignKey('foos.id')),
Column('data', String(50)))
def test_relationship_on_or(self):
bars, foos = self.tables.bars, self.tables.foos
class Foo(fixtures.ComparableEntity):
pass
class Bar(fixtures.ComparableEntity):
pass
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=sa.or_(bars.c.fid1 == foos.c.id,
bars.c.fid2 == foos.c.id),
viewonly=True)})
mapper(Bar, bars)
sess = create_session()
f1 = Foo(id=1, data='f1')
f2 = Foo(id=2, data='f2')
b1 = Bar(fid1=1, data='b1')
b2 = Bar(fid2=1, data='b2')
b3 = Bar(fid1=2, data='b3')
b4 = Bar(fid1=1, fid2=2, data='b4')
sess.add_all((f1, f2))
sess.flush()
sess.add_all((b1, b2, b3, b4))
sess.flush()
sess.expunge_all()
eq_(sess.query(Foo).filter_by(id=f1.id).one(),
Foo(bars=[Bar(data='b1'), Bar(data='b2'), Bar(data='b4')]))
eq_(sess.query(Foo).filter_by(id=f2.id).one(),
Foo(bars=[Bar(data='b3'), Bar(data='b4')]))
class ViewOnlyComplexJoin(_RelationshipErrors, fixtures.MappedTest):
"""'viewonly' mappings with a complex join condition."""
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
Table('t2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)),
Column('t1id', Integer, ForeignKey('t1.id')))
Table('t3', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)))
Table('t2tot3', metadata,
Column('t2id', Integer, ForeignKey('t2.id')),
Column('t3id', Integer, ForeignKey('t3.id')))
@classmethod
def setup_classes(cls):
class T1(cls.Comparable):
pass
class T2(cls.Comparable):
pass
class T3(cls.Comparable):
pass
def test_basic(self):
T1, t2, T2, T3, t3, t2tot3, t1 = (self.classes.T1,
self.tables.t2,
self.classes.T2,
self.classes.T3,
self.tables.t3,
self.tables.t2tot3,
self.tables.t1)
mapper(T1, t1, properties={
't3s': relationship(T3, primaryjoin=sa.and_(
t1.c.id == t2.c.t1id,
t2.c.id == t2tot3.c.t2id,
t3.c.id == t2tot3.c.t3id),
viewonly=True,
foreign_keys=t3.c.id, remote_side=t2.c.t1id)
})
mapper(T2, t2, properties={
't1': relationship(T1),
't3s': relationship(T3, secondary=t2tot3)
})
mapper(T3, t3)
sess = create_session()
sess.add(T2(data='t2', t1=T1(data='t1'), t3s=[T3(data='t3')]))
sess.flush()
sess.expunge_all()
a = sess.query(T1).first()
eq_(a.t3s, [T3(data='t3')])
def test_remote_side_escalation(self):
T1, t2, T2, T3, t3, t2tot3, t1 = (self.classes.T1,
self.tables.t2,
self.classes.T2,
self.classes.T3,
self.tables.t3,
self.tables.t2tot3,
self.tables.t1)
mapper(T1, t1, properties={
't3s': relationship(T3,
primaryjoin=sa.and_(t1.c.id == t2.c.t1id,
t2.c.id == t2tot3.c.t2id,
t3.c.id == t2tot3.c.t3id
),
viewonly=True,
foreign_keys=t3.c.id)})
mapper(T2, t2, properties={
't1': relationship(T1),
't3s': relationship(T3, secondary=t2tot3)})
mapper(T3, t3)
self._assert_raises_no_local_remote(configure_mappers, "T1.t3s")
class RemoteForeignBetweenColsTest(fixtures.DeclarativeMappedTest):
"""test a complex annotation using between().
Using declarative here as an integration test for the local()
and remote() annotations in conjunction with already annotated
instrumented attributes, etc.
"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Network(fixtures.ComparableEntity, Base):
__tablename__ = "network"
id = Column(sa.Integer, primary_key=True,
test_needs_autoincrement=True)
ip_net_addr = Column(Integer)
ip_broadcast_addr = Column(Integer)
addresses = relationship(
"Address",
primaryjoin="remote(foreign(Address.ip_addr)).between("
"Network.ip_net_addr,"
"Network.ip_broadcast_addr)",
viewonly=True
)
class Address(fixtures.ComparableEntity, Base):
__tablename__ = "address"
ip_addr = Column(Integer, primary_key=True)
@classmethod
def insert_data(cls):
Network, Address = cls.classes.Network, cls.classes.Address
s = Session(testing.db)
s.add_all([
Network(ip_net_addr=5, ip_broadcast_addr=10),
Network(ip_net_addr=15, ip_broadcast_addr=25),
Network(ip_net_addr=30, ip_broadcast_addr=35),
Address(ip_addr=17), Address(ip_addr=18), Address(ip_addr=9),
Address(ip_addr=27)
])
s.commit()
def test_col_query(self):
Network, Address = self.classes.Network, self.classes.Address
session = Session(testing.db)
eq_(
session.query(Address.ip_addr).
select_from(Network).
join(Network.addresses).
filter(Network.ip_net_addr == 15).
all(),
[(17, ), (18, )]
)
def test_lazyload(self):
Network, Address = self.classes.Network, self.classes.Address
session = Session(testing.db)
n3 = session.query(Network).filter(Network.ip_net_addr == 5).one()
eq_([a.ip_addr for a in n3.addresses], [9])
class ExplicitLocalRemoteTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('id', String(50), primary_key=True),
Column('data', String(50)))
Table('t2', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50)),
Column('t1id', String(50)))
@classmethod
def setup_classes(cls):
class T1(cls.Comparable):
pass
class T2(cls.Comparable):
pass
def test_onetomany_funcfk_oldstyle(self):
T2, T1, t2, t1 = (self.classes.T2,
self.classes.T1,
self.tables.t2,
self.tables.t1)
# old _local_remote_pairs
mapper(T1, t1, properties={
't2s': relationship(
T2,
primaryjoin=t1.c.id == sa.func.lower(t2.c.t1id),
_local_remote_pairs=[(t1.c.id, t2.c.t1id)],
foreign_keys=[t2.c.t1id]
)
})
mapper(T2, t2)
self._test_onetomany()
def test_onetomany_funcfk_annotated(self):
T2, T1, t2, t1 = (self.classes.T2,
self.classes.T1,
self.tables.t2,
self.tables.t1)
# use annotation
mapper(T1, t1, properties={
't2s': relationship(T2,
primaryjoin=t1.c.id ==
foreign(sa.func.lower(t2.c.t1id)),
)})
mapper(T2, t2)
self._test_onetomany()
def _test_onetomany(self):
T2, T1, t2, t1 = (self.classes.T2,
self.classes.T1,
self.tables.t2,
self.tables.t1)
is_(T1.t2s.property.direction, ONETOMANY)
eq_(T1.t2s.property.local_remote_pairs, [(t1.c.id, t2.c.t1id)])
sess = create_session()
a1 = T1(id='number1', data='a1')
a2 = T1(id='number2', data='a2')
b1 = T2(data='b1', t1id='NuMbEr1')
b2 = T2(data='b2', t1id='Number1')
b3 = T2(data='b3', t1id='Number2')
sess.add_all((a1, a2, b1, b2, b3))
sess.flush()
sess.expunge_all()
eq_(sess.query(T1).first(),
T1(id='number1', data='a1', t2s=[
T2(data='b1', t1id='NuMbEr1'),
T2(data='b2', t1id='Number1')]))
def test_manytoone_funcfk(self):
T2, T1, t2, t1 = (self.classes.T2,
self.classes.T1,
self.tables.t2,
self.tables.t1)
mapper(T1, t1)
mapper(T2, t2, properties={
't1': relationship(T1,
primaryjoin=t1.c.id == sa.func.lower(t2.c.t1id),
_local_remote_pairs=[(t2.c.t1id, t1.c.id)],
foreign_keys=[t2.c.t1id],
uselist=True)})
sess = create_session()
a1 = T1(id='number1', data='a1')
a2 = T1(id='number2', data='a2')
b1 = T2(data='b1', t1id='NuMbEr1')
b2 = T2(data='b2', t1id='Number1')
b3 = T2(data='b3', t1id='Number2')
sess.add_all((a1, a2, b1, b2, b3))
sess.flush()
sess.expunge_all()
eq_(sess.query(T2).filter(T2.data.in_(['b1', 'b2'])).all(),
[T2(data='b1', t1=[T1(id='number1', data='a1')]),
T2(data='b2', t1=[T1(id='number1', data='a1')])])
def test_onetomany_func_referent(self):
T2, T1, t2, t1 = (self.classes.T2,
self.classes.T1,
self.tables.t2,
self.tables.t1)
mapper(T1, t1, properties={
't2s': relationship(
T2,
primaryjoin=sa.func.lower(t1.c.id) == t2.c.t1id,
_local_remote_pairs=[(t1.c.id, t2.c.t1id)],
foreign_keys=[t2.c.t1id])})
mapper(T2, t2)
sess = create_session()
a1 = T1(id='NuMbeR1', data='a1')
a2 = T1(id='NuMbeR2', data='a2')
b1 = T2(data='b1', t1id='number1')
b2 = T2(data='b2', t1id='number1')
b3 = T2(data='b2', t1id='number2')
sess.add_all((a1, a2, b1, b2, b3))
sess.flush()
sess.expunge_all()
eq_(sess.query(T1).first(),
T1(id='NuMbeR1', data='a1', t2s=[
T2(data='b1', t1id='number1'),
T2(data='b2', t1id='number1')]))
def test_manytoone_func_referent(self):
T2, T1, t2, t1 = (self.classes.T2,
self.classes.T1,
self.tables.t2,
self.tables.t1)
mapper(T1, t1)
mapper(T2, t2, properties={
't1': relationship(T1,
primaryjoin=sa.func.lower(t1.c.id) == t2.c.t1id,
_local_remote_pairs=[(t2.c.t1id, t1.c.id)],
foreign_keys=[t2.c.t1id], uselist=True)})
sess = create_session()
a1 = T1(id='NuMbeR1', data='a1')
a2 = T1(id='NuMbeR2', data='a2')
b1 = T2(data='b1', t1id='number1')
b2 = T2(data='b2', t1id='number1')
b3 = T2(data='b3', t1id='number2')
sess.add_all((a1, a2, b1, b2, b3))
sess.flush()
sess.expunge_all()
eq_(sess.query(T2).filter(T2.data.in_(['b1', 'b2'])).all(),
[T2(data='b1', t1=[T1(id='NuMbeR1', data='a1')]),
T2(data='b2', t1=[T1(id='NuMbeR1', data='a1')])])
def test_escalation_1(self):
T2, T1, t2, t1 = (self.classes.T2,
self.classes.T1,
self.tables.t2,
self.tables.t1)
mapper(T1, t1, properties={
't2s': relationship(
T2,
primaryjoin=t1.c.id == sa.func.lower(t2.c.t1id),
_local_remote_pairs=[(t1.c.id, t2.c.t1id)],
foreign_keys=[t2.c.t1id],
remote_side=[t2.c.t1id])})
mapper(T2, t2)
assert_raises(sa.exc.ArgumentError, sa.orm.configure_mappers)
def test_escalation_2(self):
T2, T1, t2, t1 = (self.classes.T2,
self.classes.T1,
self.tables.t2,
self.tables.t1)
mapper(T1, t1, properties={
't2s': relationship(
T2,
primaryjoin=t1.c.id == sa.func.lower(t2.c.t1id),
_local_remote_pairs=[(t1.c.id, t2.c.t1id)])})
mapper(T2, t2)
assert_raises(sa.exc.ArgumentError, sa.orm.configure_mappers)
class InvalidRemoteSideTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('t1', metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column('t_id', Integer, ForeignKey('t1.id'))
)
@classmethod
def setup_classes(cls):
class T1(cls.Comparable):
pass
def test_o2m_backref(self):
T1, t1 = self.classes.T1, self.tables.t1
mapper(T1, t1, properties={
't1s': relationship(T1, backref='parent')
})
assert_raises_message(
sa.exc.ArgumentError,
"T1.t1s and back-reference T1.parent are "
r"both of the same direction symbol\('ONETOMANY'\). Did you "
"mean to set remote_side on the many-to-one side ?",
configure_mappers)
def test_m2o_backref(self):
T1, t1 = self.classes.T1, self.tables.t1
mapper(T1, t1, properties={
't1s': relationship(T1,
backref=backref('parent', remote_side=t1.c.id),
remote_side=t1.c.id)
})
assert_raises_message(
sa.exc.ArgumentError,
"T1.t1s and back-reference T1.parent are "
r"both of the same direction symbol\('MANYTOONE'\). Did you "
"mean to set remote_side on the many-to-one side ?",
configure_mappers)
def test_o2m_explicit(self):
T1, t1 = self.classes.T1, self.tables.t1
mapper(T1, t1, properties={
't1s': relationship(T1, back_populates='parent'),
'parent': relationship(T1, back_populates='t1s'),
})
# can't be sure of ordering here
assert_raises_message(
sa.exc.ArgumentError,
r"both of the same direction symbol\('ONETOMANY'\). Did you "
"mean to set remote_side on the many-to-one side ?",
configure_mappers)
def test_m2o_explicit(self):
T1, t1 = self.classes.T1, self.tables.t1
mapper(T1, t1, properties={
't1s': relationship(T1, back_populates='parent',
remote_side=t1.c.id),
'parent': relationship(T1, back_populates='t1s',
remote_side=t1.c.id)
})
# can't be sure of ordering here
assert_raises_message(
sa.exc.ArgumentError,
r"both of the same direction symbol\('MANYTOONE'\). Did you "
"mean to set remote_side on the many-to-one side ?",
configure_mappers)
class AmbiguousFKResolutionTest(_RelationshipErrors, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table("a", metadata,
Column('id', Integer, primary_key=True)
)
Table("b", metadata,
Column('id', Integer, primary_key=True),
Column('aid_1', Integer, ForeignKey('a.id')),
Column('aid_2', Integer, ForeignKey('a.id')),
)
Table("atob", metadata,
Column('aid', Integer),
Column('bid', Integer),
)
Table("atob_ambiguous", metadata,
Column('aid1', Integer, ForeignKey('a.id')),
Column('bid1', Integer, ForeignKey('b.id')),
Column('aid2', Integer, ForeignKey('a.id')),
Column('bid2', Integer, ForeignKey('b.id')),
)
@classmethod
def setup_classes(cls):
class A(cls.Basic):
pass
class B(cls.Basic):
pass
def test_ambiguous_fks_o2m(self):
A, B = self.classes.A, self.classes.B
a, b = self.tables.a, self.tables.b
mapper(A, a, properties={
'bs': relationship(B)
})
mapper(B, b)
self._assert_raises_ambig_join(
configure_mappers,
"A.bs",
None
)
def test_with_fks_o2m(self):
A, B = self.classes.A, self.classes.B
a, b = self.tables.a, self.tables.b
mapper(A, a, properties={
'bs': relationship(B, foreign_keys=b.c.aid_1)
})
mapper(B, b)
sa.orm.configure_mappers()
assert A.bs.property.primaryjoin.compare(
a.c.id == b.c.aid_1
)
eq_(
A.bs.property._calculated_foreign_keys,
set([b.c.aid_1])
)
def test_with_pj_o2m(self):
A, B = self.classes.A, self.classes.B
a, b = self.tables.a, self.tables.b
mapper(A, a, properties={
'bs': relationship(B, primaryjoin=a.c.id == b.c.aid_1)
})
mapper(B, b)
sa.orm.configure_mappers()
assert A.bs.property.primaryjoin.compare(
a.c.id == b.c.aid_1
)
eq_(
A.bs.property._calculated_foreign_keys,
set([b.c.aid_1])
)
def test_with_annotated_pj_o2m(self):
A, B = self.classes.A, self.classes.B
a, b = self.tables.a, self.tables.b
mapper(A, a, properties={
'bs': relationship(B, primaryjoin=a.c.id == foreign(b.c.aid_1))
})
mapper(B, b)
sa.orm.configure_mappers()
assert A.bs.property.primaryjoin.compare(
a.c.id == b.c.aid_1
)
eq_(
A.bs.property._calculated_foreign_keys,
set([b.c.aid_1])
)
def test_no_fks_m2m(self):
A, B = self.classes.A, self.classes.B
a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob
mapper(A, a, properties={
'bs': relationship(B, secondary=a_to_b)
})
mapper(B, b)
self._assert_raises_no_join(
sa.orm.configure_mappers,
"A.bs", a_to_b,
)
def test_ambiguous_fks_m2m(self):
A, B = self.classes.A, self.classes.B
a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob_ambiguous
mapper(A, a, properties={
'bs': relationship(B, secondary=a_to_b)
})
mapper(B, b)
self._assert_raises_ambig_join(
configure_mappers,
"A.bs",
"atob_ambiguous"
)
def test_with_fks_m2m(self):
A, B = self.classes.A, self.classes.B
a, b, a_to_b = self.tables.a, self.tables.b, self.tables.atob_ambiguous
mapper(A, a, properties={
'bs': relationship(B, secondary=a_to_b,
foreign_keys=[a_to_b.c.aid1, a_to_b.c.bid1])
})
mapper(B, b)
sa.orm.configure_mappers()
class SecondaryNestedJoinTest(fixtures.MappedTest, AssertsCompiledSQL,
testing.AssertsExecutionResults):
"""test support for a relationship where the 'secondary' table is a
compound join().
join() and joinedload() should use a "flat" alias, lazyloading needs
to ensure the join renders.
"""
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table(
'a', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)),
Column('b_id', ForeignKey('b.id'))
)
Table('b', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)),
Column('d_id', ForeignKey('d.id'))
)
Table('c', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)),
Column('a_id', ForeignKey('a.id')),
Column('d_id', ForeignKey('d.id'))
)
Table('d', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(30)),
)
@classmethod
def setup_classes(cls):
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(cls.Comparable):
pass
class D(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
A, B, C, D = cls.classes.A, cls.classes.B, cls.classes.C, cls.classes.D
a, b, c, d = cls.tables.a, cls.tables.b, cls.tables.c, cls.tables.d
j = sa.join(b, d, b.c.d_id == d.c.id).join(c, c.c.d_id == d.c.id)
#j = join(b, d, b.c.d_id == d.c.id).join(c, c.c.d_id == d.c.id).alias()
mapper(A, a, properties={
"b": relationship(B),
"d": relationship(
D, secondary=j,
primaryjoin=and_(a.c.b_id == b.c.id, a.c.id == c.c.a_id),
secondaryjoin=d.c.id == b.c.d_id,
#primaryjoin=and_(a.c.b_id == j.c.b_id, a.c.id == j.c.c_a_id),
#secondaryjoin=d.c.id == j.c.b_d_id,
uselist=False,
viewonly=True
)
})
mapper(B, b, properties={
"d": relationship(D)
})
mapper(C, c, properties={
"a": relationship(A),
"d": relationship(D)
})
mapper(D, d)
@classmethod
def insert_data(cls):
A, B, C, D = cls.classes.A, cls.classes.B, cls.classes.C, cls.classes.D
sess = Session()
a1, a2, a3, a4 = A(name='a1'), A(name='a2'), A(name='a3'), A(name='a4')
b1, b2, b3, b4 = B(name='b1'), B(name='b2'), B(name='b3'), B(name='b4')
c1, c2, c3, c4 = C(name='c1'), C(name='c2'), C(name='c3'), C(name='c4')
d1, d2 = D(name='d1'), D(name='d2')
a1.b = b1
a2.b = b2
a3.b = b3
a4.b = b4
c1.a = a1
c2.a = a2
c3.a = a2
c4.a = a4
c1.d = d1
c2.d = d2
c3.d = d1
c4.d = d2
b1.d = d1
b2.d = d1
b3.d = d2
b4.d = d2
sess.add_all([a1, a2, a3, a4, b1, b2, b3, b4, c1, c2, c4, c4, d1, d2])
sess.commit()
def test_render_join(self):
A, D = self.classes.A, self.classes.D
sess = Session()
self.assert_compile(
sess.query(A).join(A.d),
"SELECT a.id AS a_id, a.name AS a_name, a.b_id AS a_b_id "
"FROM a JOIN (b AS b_1 JOIN d AS d_1 ON b_1.d_id = d_1.id "
"JOIN c AS c_1 ON c_1.d_id = d_1.id) ON a.b_id = b_1.id "
"AND a.id = c_1.a_id JOIN d ON d.id = b_1.d_id",
dialect="postgresql"
)
def test_render_joinedload(self):
A, D = self.classes.A, self.classes.D
sess = Session()
self.assert_compile(
sess.query(A).options(joinedload(A.d)),
"SELECT a.id AS a_id, a.name AS a_name, a.b_id AS a_b_id, "
"d_1.id AS d_1_id, d_1.name AS d_1_name FROM a LEFT OUTER JOIN "
"(b AS b_1 JOIN d AS d_2 ON b_1.d_id = d_2.id JOIN c AS c_1 "
"ON c_1.d_id = d_2.id JOIN d AS d_1 ON d_1.id = b_1.d_id) "
"ON a.b_id = b_1.id AND a.id = c_1.a_id",
dialect="postgresql"
)
def test_render_lazyload(self):
from sqlalchemy.testing.assertsql import CompiledSQL
A, D = self.classes.A, self.classes.D
sess = Session()
a1 = sess.query(A).filter(A.name == 'a1').first()
def go():
a1.d
# here, the "lazy" strategy has to ensure the "secondary"
# table is part of the "select_from()", since it's a join().
# referring to just the columns wont actually render all those
# join conditions.
self.assert_sql_execution(
testing.db,
go,
CompiledSQL(
"SELECT d.id AS d_id, d.name AS d_name FROM b "
"JOIN d ON b.d_id = d.id JOIN c ON c.d_id = d.id "
"WHERE :param_1 = b.id AND :param_2 = c.a_id "
"AND d.id = b.d_id",
{'param_1': a1.id, 'param_2': a1.id}
)
)
mapping = {
"a1": "d1",
"a2": None,
"a3": None,
"a4": "d2"
}
def test_join(self):
A, D = self.classes.A, self.classes.D
sess = Session()
for a, d in sess.query(A, D).outerjoin(A.d):
eq_(self.mapping[a.name], d.name if d is not None else None)
def test_joinedload(self):
A, D = self.classes.A, self.classes.D
sess = Session()
for a in sess.query(A).options(joinedload(A.d)):
d = a.d
eq_(self.mapping[a.name], d.name if d is not None else None)
def test_lazyload(self):
A, D = self.classes.A, self.classes.D
sess = Session()
for a in sess.query(A):
d = a.d
eq_(self.mapping[a.name], d.name if d is not None else None)
class InvalidRelationshipEscalationTest(
_RelationshipErrors, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('foos', metadata,
Column('id', Integer, primary_key=True),
Column('fid', Integer))
Table('bars', metadata,
Column('id', Integer, primary_key=True),
Column('fid', Integer))
Table('foos_with_fks', metadata,
Column('id', Integer, primary_key=True),
Column('fid', Integer, ForeignKey('foos_with_fks.id')))
Table('bars_with_fks', metadata,
Column('id', Integer, primary_key=True),
Column('fid', Integer, ForeignKey('foos_with_fks.id')))
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(cls.Basic):
pass
def test_no_join(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar)})
mapper(Bar, bars)
self._assert_raises_no_join(sa.orm.configure_mappers,
"Foo.bars", None
)
def test_no_join_self_ref(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'foos': relationship(Foo)})
mapper(Bar, bars)
self._assert_raises_no_join(
configure_mappers,
"Foo.foos",
None
)
def test_no_equated(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=foos.c.id > bars.c.fid)})
mapper(Bar, bars)
self._assert_raises_no_relevant_fks(
configure_mappers,
"foos.id > bars.fid", "Foo.bars", "primary"
)
def test_no_equated_fks(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=foos.c.id > bars.c.fid,
foreign_keys=bars.c.fid)})
mapper(Bar, bars)
self._assert_raises_no_equality(
sa.orm.configure_mappers,
"foos.id > bars.fid", "Foo.bars", "primary"
)
def test_no_equated_wo_fks_works_on_relaxed(self):
foos_with_fks, Foo, Bar, bars_with_fks, foos = (
self.tables.foos_with_fks,
self.classes.Foo,
self.classes.Bar,
self.tables.bars_with_fks,
self.tables.foos)
# very unique - the join between parent/child
# has no fks, but there is an fk join between two other
# tables in the join condition, for those users that try creating
# these big-long-string-of-joining-many-tables primaryjoins.
# in this case we don't get eq_pairs, but we hit the
# "works if viewonly" rule. so here we add another clause regarding
# "try foreign keys".
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=and_(
bars_with_fks.c.fid == foos_with_fks.c.id,
foos_with_fks.c.id == foos.c.id,
)
)})
mapper(Bar, bars_with_fks)
self._assert_raises_no_equality(
sa.orm.configure_mappers,
"bars_with_fks.fid = foos_with_fks.id "
"AND foos_with_fks.id = foos.id",
"Foo.bars", "primary"
)
def test_ambiguous_fks(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=foos.c.id == bars.c.fid,
foreign_keys=[foos.c.id, bars.c.fid])})
mapper(Bar, bars)
self._assert_raises_ambiguous_direction(
sa.orm.configure_mappers,
"Foo.bars"
)
def test_ambiguous_remoteside_o2m(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=foos.c.id == bars.c.fid,
foreign_keys=[bars.c.fid],
remote_side=[foos.c.id, bars.c.fid],
viewonly=True
)})
mapper(Bar, bars)
self._assert_raises_no_local_remote(
configure_mappers,
"Foo.bars",
)
def test_ambiguous_remoteside_m2o(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=foos.c.id == bars.c.fid,
foreign_keys=[foos.c.id],
remote_side=[foos.c.id, bars.c.fid],
viewonly=True
)})
mapper(Bar, bars)
self._assert_raises_no_local_remote(
configure_mappers,
"Foo.bars",
)
def test_no_equated_self_ref_no_fks(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'foos': relationship(Foo,
primaryjoin=foos.c.id > foos.c.fid)})
mapper(Bar, bars)
self._assert_raises_no_relevant_fks(
configure_mappers,
"foos.id > foos.fid", "Foo.foos", "primary"
)
def test_no_equated_self_ref_no_equality(self):
bars, Foo, Bar, foos = (self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'foos': relationship(Foo,
primaryjoin=foos.c.id > foos.c.fid,
foreign_keys=[foos.c.fid])})
mapper(Bar, bars)
self._assert_raises_no_equality(configure_mappers,
"foos.id > foos.fid", "Foo.foos", "primary"
)
def test_no_equated_viewonly(self):
bars, Bar, bars_with_fks, foos_with_fks, Foo, foos = (
self.tables.bars,
self.classes.Bar,
self.tables.bars_with_fks,
self.tables.foos_with_fks,
self.classes.Foo,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=foos.c.id > bars.c.fid,
viewonly=True)})
mapper(Bar, bars)
self._assert_raises_no_relevant_fks(
sa.orm.configure_mappers,
"foos.id > bars.fid", "Foo.bars", "primary"
)
sa.orm.clear_mappers()
mapper(Foo, foos_with_fks, properties={
'bars': relationship(
Bar,
primaryjoin=foos_with_fks.c.id > bars_with_fks.c.fid,
viewonly=True)})
mapper(Bar, bars_with_fks)
sa.orm.configure_mappers()
def test_no_equated_self_ref_viewonly(self):
bars, Bar, bars_with_fks, foos_with_fks, Foo, foos = (
self.tables.bars,
self.classes.Bar,
self.tables.bars_with_fks,
self.tables.foos_with_fks,
self.classes.Foo,
self.tables.foos)
mapper(Foo, foos, properties={
'foos': relationship(Foo,
primaryjoin=foos.c.id > foos.c.fid,
viewonly=True)})
mapper(Bar, bars)
self._assert_raises_no_relevant_fks(
sa.orm.configure_mappers,
"foos.id > foos.fid", "Foo.foos", "primary"
)
sa.orm.clear_mappers()
mapper(Foo, foos_with_fks, properties={
'foos': relationship(
Foo,
primaryjoin=foos_with_fks.c.id > foos_with_fks.c.fid,
viewonly=True)})
mapper(Bar, bars_with_fks)
sa.orm.configure_mappers()
def test_no_equated_self_ref_viewonly_fks(self):
Foo, foos = self.classes.Foo, self.tables.foos
mapper(Foo, foos, properties={
'foos': relationship(Foo,
primaryjoin=foos.c.id > foos.c.fid,
viewonly=True,
foreign_keys=[foos.c.fid])})
sa.orm.configure_mappers()
eq_(Foo.foos.property.local_remote_pairs, [(foos.c.id, foos.c.fid)])
def test_equated(self):
bars, Bar, bars_with_fks, foos_with_fks, Foo, foos = (
self.tables.bars,
self.classes.Bar,
self.tables.bars_with_fks,
self.tables.foos_with_fks,
self.classes.Foo,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
primaryjoin=foos.c.id == bars.c.fid)})
mapper(Bar, bars)
self._assert_raises_no_relevant_fks(
configure_mappers,
"foos.id = bars.fid", "Foo.bars", "primary"
)
sa.orm.clear_mappers()
mapper(Foo, foos_with_fks, properties={
'bars': relationship(
Bar,
primaryjoin=foos_with_fks.c.id == bars_with_fks.c.fid)})
mapper(Bar, bars_with_fks)
sa.orm.configure_mappers()
def test_equated_self_ref(self):
Foo, foos = self.classes.Foo, self.tables.foos
mapper(Foo, foos, properties={
'foos': relationship(Foo,
primaryjoin=foos.c.id == foos.c.fid)})
self._assert_raises_no_relevant_fks(
configure_mappers,
"foos.id = foos.fid", "Foo.foos", "primary"
)
def test_equated_self_ref_wrong_fks(self):
bars, Foo, foos = (self.tables.bars,
self.classes.Foo,
self.tables.foos)
mapper(Foo, foos, properties={
'foos': relationship(Foo,
primaryjoin=foos.c.id == foos.c.fid,
foreign_keys=[bars.c.id])})
self._assert_raises_no_relevant_fks(
configure_mappers,
"foos.id = foos.fid", "Foo.foos", "primary"
)
class InvalidRelationshipEscalationTestM2M(
_RelationshipErrors, fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('foos', metadata,
Column('id', Integer, primary_key=True))
Table('foobars', metadata,
Column('fid', Integer), Column('bid', Integer))
Table('bars', metadata,
Column('id', Integer, primary_key=True))
Table('foobars_with_fks', metadata,
Column('fid', Integer, ForeignKey('foos.id')),
Column('bid', Integer, ForeignKey('bars.id'))
)
Table('foobars_with_many_columns', metadata,
Column('fid', Integer),
Column('bid', Integer),
Column('fid1', Integer),
Column('bid1', Integer),
Column('fid2', Integer),
Column('bid2', Integer),
)
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(cls.Basic):
pass
def test_no_join(self):
foobars, bars, Foo, Bar, foos = (self.tables.foobars,
self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar, secondary=foobars)})
mapper(Bar, bars)
self._assert_raises_no_join(
configure_mappers,
"Foo.bars",
"foobars"
)
def test_no_secondaryjoin(self):
foobars, bars, Foo, Bar, foos = (self.tables.foobars,
self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
secondary=foobars,
primaryjoin=foos.c.id > foobars.c.fid)})
mapper(Bar, bars)
self._assert_raises_no_join(
configure_mappers,
"Foo.bars",
"foobars"
)
def test_no_fks(self):
foobars_with_many_columns, bars, Bar, foobars, Foo, foos = (
self.tables.foobars_with_many_columns,
self.tables.bars,
self.classes.Bar,
self.tables.foobars,
self.classes.Foo,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar, secondary=foobars,
primaryjoin=foos.c.id == foobars.c.fid,
secondaryjoin=foobars.c.bid == bars.c.id)})
mapper(Bar, bars)
sa.orm.configure_mappers()
eq_(
Foo.bars.property.synchronize_pairs,
[(foos.c.id, foobars.c.fid)]
)
eq_(
Foo.bars.property.secondary_synchronize_pairs,
[(bars.c.id, foobars.c.bid)]
)
sa.orm.clear_mappers()
mapper(Foo, foos, properties={
'bars': relationship(
Bar,
secondary=foobars_with_many_columns,
primaryjoin=foos.c.id ==
foobars_with_many_columns.c.fid,
secondaryjoin=foobars_with_many_columns.c.bid ==
bars.c.id)})
mapper(Bar, bars)
sa.orm.configure_mappers()
eq_(
Foo.bars.property.synchronize_pairs,
[(foos.c.id, foobars_with_many_columns.c.fid)]
)
eq_(
Foo.bars.property.secondary_synchronize_pairs,
[(bars.c.id, foobars_with_many_columns.c.bid)]
)
def test_local_col_setup(self):
foobars_with_fks, bars, Bar, Foo, foos = (
self.tables.foobars_with_fks,
self.tables.bars,
self.classes.Bar,
self.classes.Foo,
self.tables.foos)
# ensure m2m backref is set up with correct annotations
# [ticket:2578]
mapper(Foo, foos, properties={
'bars': relationship(Bar, secondary=foobars_with_fks, backref="foos")
})
mapper(Bar, bars)
sa.orm.configure_mappers()
eq_(
Foo.bars.property._join_condition.local_columns,
set([foos.c.id])
)
eq_(
Bar.foos.property._join_condition.local_columns,
set([bars.c.id])
)
def test_bad_primaryjoin(self):
foobars_with_fks, bars, Bar, foobars, Foo, foos = (
self.tables.foobars_with_fks,
self.tables.bars,
self.classes.Bar,
self.tables.foobars,
self.classes.Foo,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
secondary=foobars,
primaryjoin=foos.c.id > foobars.c.fid,
secondaryjoin=foobars.c.bid <= bars.c.id)})
mapper(Bar, bars)
self._assert_raises_no_equality(
configure_mappers,
'foos.id > foobars.fid',
"Foo.bars",
"primary")
sa.orm.clear_mappers()
mapper(Foo, foos, properties={
'bars': relationship(
Bar,
secondary=foobars_with_fks,
primaryjoin=foos.c.id > foobars_with_fks.c.fid,
secondaryjoin=foobars_with_fks.c.bid <= bars.c.id)})
mapper(Bar, bars)
self._assert_raises_no_equality(
configure_mappers,
'foos.id > foobars_with_fks.fid',
"Foo.bars",
"primary")
sa.orm.clear_mappers()
mapper(Foo, foos, properties={
'bars': relationship(
Bar,
secondary=foobars_with_fks,
primaryjoin=foos.c.id > foobars_with_fks.c.fid,
secondaryjoin=foobars_with_fks.c.bid <= bars.c.id,
viewonly=True)})
mapper(Bar, bars)
sa.orm.configure_mappers()
def test_bad_secondaryjoin(self):
foobars, bars, Foo, Bar, foos = (self.tables.foobars,
self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
secondary=foobars,
primaryjoin=foos.c.id == foobars.c.fid,
secondaryjoin=foobars.c.bid <= bars.c.id,
foreign_keys=[foobars.c.fid])})
mapper(Bar, bars)
self._assert_raises_no_relevant_fks(
configure_mappers,
"foobars.bid <= bars.id",
"Foo.bars",
"secondary"
)
def test_no_equated_secondaryjoin(self):
foobars, bars, Foo, Bar, foos = (self.tables.foobars,
self.tables.bars,
self.classes.Foo,
self.classes.Bar,
self.tables.foos)
mapper(Foo, foos, properties={
'bars': relationship(Bar,
secondary=foobars,
primaryjoin=foos.c.id == foobars.c.fid,
secondaryjoin=foobars.c.bid <= bars.c.id,
foreign_keys=[foobars.c.fid, foobars.c.bid])})
mapper(Bar, bars)
self._assert_raises_no_equality(
configure_mappers,
"foobars.bid <= bars.id",
"Foo.bars",
"secondary"
)
class ActiveHistoryFlagTest(_fixtures.FixtureTest):
run_inserts = None
run_deletes = None
def _test_attribute(self, obj, attrname, newvalue):
sess = Session()
sess.add(obj)
oldvalue = getattr(obj, attrname)
sess.commit()
# expired
assert attrname not in obj.__dict__
setattr(obj, attrname, newvalue)
eq_(
attributes.get_history(obj, attrname),
([newvalue, ], (), [oldvalue, ])
)
def test_column_property_flag(self):
User, users = self.classes.User, self.tables.users
mapper(User, users, properties={
'name': column_property(users.c.name,
active_history=True)
})
u1 = User(name='jack')
self._test_attribute(u1, 'name', 'ed')
def test_relationship_property_flag(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
mapper(Address, addresses, properties={
'user': relationship(User, active_history=True)
})
mapper(User, users)
u1 = User(name='jack')
u2 = User(name='ed')
a1 = Address(email_address='a1', user=u1)
self._test_attribute(a1, 'user', u2)
def test_composite_property_flag(self):
Order, orders = self.classes.Order, self.tables.orders
class MyComposite(object):
def __init__(self, description, isopen):
self.description = description
self.isopen = isopen
def __composite_values__(self):
return [self.description, self.isopen]
def __eq__(self, other):
return isinstance(other, MyComposite) and \
other.description == self.description
mapper(Order, orders, properties={
'composite': composite(
MyComposite,
orders.c.description,
orders.c.isopen,
active_history=True)
})
o1 = Order(composite=MyComposite('foo', 1))
self._test_attribute(o1, "composite", MyComposite('bar', 1))
class RelationDeprecationTest(fixtures.MappedTest):
"""test usage of the old 'relation' function."""
run_inserts = 'once'
run_deletes = None
@classmethod
def define_tables(cls, metadata):
Table('users_table', metadata,
Column('id', Integer, primary_key=True),
Column('name', String(64)))
Table('addresses_table', metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey('users_table.id')),
Column('email_address', String(128)),
Column('purpose', String(16)),
Column('bounces', Integer, default=0))
@classmethod
def setup_classes(cls):
class User(cls.Basic):
pass
class Address(cls.Basic):
pass
@classmethod
def fixtures(cls):
return dict(
users_table=(
('id', 'name'),
(1, 'jack'),
(2, 'ed'),
(3, 'fred'),
(4, 'chuck')),
addresses_table=(
('id', 'user_id', 'email_address', 'purpose', 'bounces'),
(1, 1, 'jack@jack.home', 'Personal', 0),
(2, 1, 'jack@jack.bizz', 'Work', 1),
(3, 2, 'ed@foo.bar', 'Personal', 0),
(4, 3, 'fred@the.fred', 'Personal', 10)))
def test_relation(self):
addresses_table, User, users_table, Address = (
self.tables.addresses_table,
self.classes.User,
self.tables.users_table,
self.classes.Address)
mapper(User, users_table, properties=dict(
addresses=relation(Address, backref='user'),
))
mapper(Address, addresses_table)
session = create_session()
session.query(User).filter(User.addresses.any(
Address.email_address == 'ed@foo.bar')).one()
| {
"content_hash": "22909fd92f1ebb3ca4ac87fd343ba1b0",
"timestamp": "",
"source": "github",
"line_count": 3988,
"max_line_length": 83,
"avg_line_length": 32.86935807422267,
"alnum_prop": 0.4940915297940999,
"repo_name": "sandan/sqlalchemy",
"id": "00d41604ca234b1cd2c440bae34a7cf1b0abb463",
"size": "131083",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/orm/test_relationships.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46256"
},
{
"name": "Python",
"bytes": "9110689"
}
],
"symlink_target": ""
} |
import os
import unittest
import json
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
import numpy
from molml.base import BaseFeature, SetMergeMixin, InputTypeMixin, _func_star
from molml.base import EncodedFeature, FormMixin
from .constants import METHANE_ELEMENTS, METHANE_COORDS, METHANE_PATH
from .constants import METHANE, METHANE_NUMBERS, METHANE_CONNECTIONS
METHANE_ATOMS = numpy.array([[1, 4]])
class TestFeature1(BaseFeature):
'''
Some example doc string.
References
----------
Doe, J. Nature. (2016).
'''
LABELS = ('labels', )
ATTRIBUTES = ('data', )
def __init__(self, input_type='list', n_jobs=1, data=None, value=None):
super(TestFeature1, self).__init__(input_type, n_jobs)
self.labels = ('C', 'B', 'A')
self.data = data
self.value = value
def fit(self, X, y=None):
self.data = [1]
def _para_transform(self, X):
return [1]
class TestFeature2(BaseFeature):
'''
Some example doc string.
References
----------
Doe, J. Nature. (2016).
Smith, J. Science. (2010).
'''
LABELS = ('labels1', 'labels2')
ATTRIBUTES = ('data1', 'data2')
def __init__(self, value=None):
self.labels1 = ('A', 'B', 'C')
self.labels2 = ('DD', 'CC')
self.data1 = value
self.data2 = value
class TestFeature3(BaseFeature):
'''
Some example doc string.
References
----------
Doe, J. Nature. (2016).
Smith, J. Science. (2010).
Other
-----
Something else.
'''
LABELS = None
ATTRIBUTES = None
class TestFeature4(BaseFeature):
'''
Some example doc string.
References
----------
Doe, J. Nature. (2016).
Smith, J. Science. (2010).
'''
LABELS = (('func', 'labels'), )
ATTRIBUTES = ('data1', 'data2')
def __init__(self, value=None):
self.labels = ('A', 'B', 'C')
self.data1 = value
self.data2 = value
def func(self, labels):
return labels
class TestFeature5(BaseFeature):
'''
'''
LABELS = (('func', None), )
ATTRIBUTES = ('data1', 'data2')
def __init__(self, value=None):
self.labels = ('A', 'B', 'C')
self.data1 = value
self.data2 = value
def func(self):
return ['A', 'B', 'C']
class TestFeature6(BaseFeature):
''''''
LABELS = (('func', None), )
def __init__(self, value=None):
pass
def func(self):
raise ValueError
#################################################
class OtherTest(unittest.TestCase):
def test__func_star(self):
res = _func_star((lambda x, y: x + y, 2, 3))
self.assertEqual(res, 5)
class BaseFeatureTest(unittest.TestCase):
def test_map_n_jobs_negative(self):
a = BaseFeature(n_jobs=-1)
res = a.map(lambda x: x ** 2, range(10))
self.assertEqual(res, [x ** 2 for x in range(10)])
def test_map_n_jobs_one(self):
a = BaseFeature(n_jobs=1)
res = a.map(lambda x: x ** 2, range(10))
self.assertEqual(res, [x ** 2 for x in range(10)])
def test_map_n_jobs_greater(self):
a = BaseFeature(n_jobs=2)
res = a.map(lambda x: x ** 2, range(10))
self.assertEqual(res, [x ** 2 for x in range(10)])
def test_reduce_n_jobs_negative(self):
a = BaseFeature(n_jobs=-1)
res = a.reduce(lambda x, y: x + y, range(10))
self.assertEqual(res, sum(range(10)))
def test_reduce_n_jobs_one(self):
a = BaseFeature(n_jobs=1)
res = a.reduce(lambda x, y: x + y, range(10))
self.assertEqual(res, sum(range(10)))
def test_reduce_n_jobs_greater(self):
a = BaseFeature(n_jobs=2)
res = a.reduce(lambda x, y: x + y, range(10))
self.assertEqual(res, sum(range(10)))
def test_convert_input_list(self):
a = BaseFeature(input_type="list")
data = a.convert_input(METHANE)
self.assertEqual(data.connections, METHANE_CONNECTIONS)
self.assertEqual(data.elements.tolist(), METHANE_ELEMENTS)
self.assertEqual(data.coords.tolist(), METHANE_COORDS.tolist())
def test_convert_input_list_numbers(self):
a = BaseFeature(input_type="list")
data = a.convert_input([METHANE_NUMBERS, METHANE_COORDS])
self.assertEqual(data.numbers.tolist(), METHANE_NUMBERS)
self.assertEqual(data.connections, METHANE_CONNECTIONS)
self.assertEqual(data.coords.tolist(), METHANE_COORDS.tolist())
def test_convert_input_list_connections(self):
a = BaseFeature(input_type="list")
data = a.convert_input([METHANE_ELEMENTS, METHANE_COORDS,
METHANE_CONNECTIONS])
self.assertEqual(data.connections, METHANE_CONNECTIONS)
self.assertEqual(data.elements.tolist(), METHANE_ELEMENTS)
self.assertEqual(data.coords.tolist(), METHANE_COORDS.tolist())
def test_convert_input_filename(self):
a = BaseFeature(input_type="filename")
base_path = os.path.join(os.path.dirname(__file__), "data", "methane")
for ending in ('.xyz', '.out'):
path = base_path + ending
data = a.convert_input(path)
self.assertEqual(data.elements.tolist(), METHANE_ELEMENTS)
self.assertEqual(data.connections, METHANE_CONNECTIONS)
try:
numpy.testing.assert_array_almost_equal(
data.coords, METHANE_COORDS)
except AssertionError as e:
self.fail(e)
def test_convert_input_ele_coords(self):
a = BaseFeature(input_type=["elements", "coords"])
data = a.convert_input([METHANE_ELEMENTS, METHANE_COORDS])
self.assertEqual(data.elements.tolist(), METHANE_ELEMENTS)
try:
numpy.testing.assert_array_almost_equal(
data.coords, METHANE_COORDS)
except AssertionError as e:
self.fail(e)
def test_convert_input_num_ele(self):
a = BaseFeature(input_type=["numbers", "elements"])
data = a.convert_input([METHANE_NUMBERS, METHANE_ELEMENTS])
self.assertEqual(data.elements.tolist(), METHANE_ELEMENTS)
self.assertEqual(data.numbers.tolist(), METHANE_NUMBERS)
def test_convert_input_invalid_list(self):
a = BaseFeature(input_type=["error"])
with self.assertRaises(TypeError):
a.convert_input("bad data")
def test_convert_input_error(self):
a = BaseFeature(input_type="error")
with self.assertRaises(ValueError):
a.convert_input("bad data")
def test_convert_input_callable(self):
a = BaseFeature(input_type=lambda x: (x, x ** 2))
res = a.convert_input(10)
self.assertEqual(res, (10, 100))
def test_slugify(self):
a = TestFeature1()
expected = [
'TestFeature1',
'data=None',
'value=None',
]
self.assertEqual(a.slugify(), '__'.join(expected))
def test_get_params(self):
a = BaseFeature(n_jobs=10)
expected = {"input_type": "list", "n_jobs": 10}
self.assertEqual(a.get_params(), expected)
def test_set_params(self):
a = BaseFeature(n_jobs=10)
new = {
"input_type": "coords",
"n_jobs": 100,
"fake": None,
}
a.set_params(**new)
self.assertEqual(a.input_type, "coords")
self.assertEqual(a.n_jobs, 100)
with self.assertRaises(AttributeError):
a.fake
def test_get_labels(self):
a = TestFeature1()
self.assertEqual(a.get_labels(), ('C', 'B', 'A'))
b = TestFeature2()
self.assertEqual(b.get_labels(), ('A', 'B', 'C', 'DD', 'CC'))
c = TestFeature3()
self.assertEqual(c.get_labels(), tuple())
d = TestFeature4()
self.assertEqual(d.get_labels(), ('A', 'B', 'C'))
e = TestFeature5()
self.assertEqual(e.get_labels(), ('A', 'B', 'C'))
f = TestFeature6()
with self.assertRaises(ValueError):
f.get_labels()
def test_check_fit(self):
a = TestFeature1(data=1)
self.assertIsNone(a.check_fit())
b = TestFeature2(value=1)
self.assertIsNone(b.check_fit())
c = TestFeature3()
self.assertIsNone(c.check_fit())
with self.assertRaises(ValueError):
a = TestFeature1()
a.check_fit()
with self.assertRaises(ValueError):
b = TestFeature2()
b.check_fit()
def test_get_citation(self):
citation = "MolML https://github.com/crcollins/molml"
self.assertEqual(citation, BaseFeature.get_citation())
self.assertEqual("Doe, J. Nature. (2016).",
TestFeature1.get_citation())
expected = "Doe, J. Nature. (2016).\n"
expected += "Smith, J. Science. (2010)."
self.assertEqual(expected, TestFeature2.get_citation())
self.assertEqual(expected, TestFeature3.get_citation())
def test_save_json(self):
a = TestFeature1()
f = StringIO()
a.save_json(f)
string = f.getvalue()
data = json.loads(string)
base = a.__module__
expected = {'parameters': {'n_jobs': 1,
'input_type': 'list',
'data': None,
'value': None},
'attributes': {'data': None},
'transformer': base + '.TestFeature1'}
self.assertEqual(data, expected)
path = '/tmp/somefile.json'
a.save_json(path)
with open(path, 'r') as f:
data = json.load(f)
self.assertEqual(data, expected)
def test_to_json_no_attributes(self):
a = TestFeature3()
data = a.to_json()
base = a.__module__
expected = {'parameters': {'n_jobs': 1,
'input_type': 'list'},
'attributes': {},
'transformer': base + '.TestFeature3'}
self.assertEqual(data, expected)
def test_save_json_nested_obj(self):
a = TestFeature1(value=TestFeature1())
data = a.to_json()
base = a.__module__
expected = {
'attributes': {'data': None},
'parameters': {
'n_jobs': 1,
'input_type': 'list',
'value': {
'parameters': {
'n_jobs': 1,
'input_type': 'list',
'value': None,
'data': None,
},
'attributes': {'data': None},
'transformer': base + '.TestFeature1',
},
'data': None,
},
'transformer': base + '.TestFeature1'
}
self.assertEqual(data, expected)
def test_transform(self):
a = TestFeature1()
a.fit([1])
res = a.transform([1, 2, 3])
expected = numpy.array([[1], [1], [1]])
try:
numpy.testing.assert_array_almost_equal(res, expected)
except AssertionError as e:
self.fail(e)
def test_fit_transform(self):
a = TestFeature1()
res = a.fit_transform([1, 2, 3])
expected = numpy.array([[1], [1], [1]])
try:
numpy.testing.assert_array_almost_equal(res, expected)
except AssertionError as e:
self.fail(e)
class TestSetMergeMixin(unittest.TestCase):
def test_multiple_attributes(self):
class TestFeature(SetMergeMixin, BaseFeature):
ATTRIBUTES = ("test1", "test2")
def __init__(self, *args, **kwargs):
super(TestFeature, self).__init__(*args, **kwargs)
def _para_fit(self, X):
return (set([1, 2, 3]), set([2, 3, 4]))
a = TestFeature(input_type="filename")
a.fit([METHANE_PATH, METHANE_PATH])
self.assertEqual((1, 2, 3), a.test1)
self.assertEqual((2, 3, 4), a.test2)
def test_fit(self):
class TestFeature(SetMergeMixin, BaseFeature):
ATTRIBUTES = ("test1", )
def __init__(self, *args, **kwargs):
super(TestFeature, self).__init__(*args, **kwargs)
def _para_fit(self, X):
return set([1, 2, 3])
a = TestFeature(input_type="filename")
a.fit([METHANE_PATH, METHANE_PATH])
self.assertEqual((1, 2, 3), a.test1)
class Feature(InputTypeMixin, BaseFeature):
def __init__(self, input_type=None, transformer=None, *args, **kwargs):
super(Feature, self).__init__(input_type=input_type, *args, **kwargs)
self.check_transformer(transformer)
self.transformer = transformer
class TestInputTypeMixin(unittest.TestCase):
def test_input_type_default(self):
a = Feature()
self.assertEqual("list", a.input_type)
def test_input_type_mismatch(self):
trans = BaseFeature(input_type="filename")
with self.assertRaises(ValueError):
Feature(input_type="list", transformer=trans)
def test_input_type_match(self):
trans = BaseFeature(input_type="filename")
a = Feature(input_type="filename", transformer=trans)
self.assertEqual("filename", a.input_type)
def test_input_type_normal(self):
a = Feature(input_type="filename")
self.assertEqual("filename", a.input_type)
def test_input_type_from_param(self):
trans = BaseFeature(input_type="filename")
a = Feature(transformer=trans)
self.assertEqual("filename", a.input_type)
class FormFeature(FormMixin, BaseFeature):
ATTRIBUTES = ('data', )
LABELS = ('data', )
def __init__(self, input_type=None, form=1, add_unknown=False,
*args, **kwargs):
super(FormFeature, self).__init__(input_type=input_type,
*args, **kwargs)
self.data = (('A', 'B'), ('B', 'C'), ('C', 'D'))
self.form = form
self.add_unknown = add_unknown
def _para_transform(self, X):
pass
class TestFormMixin(unittest.TestCase):
def test_get_encoded_labels_unknown(self):
a = FormFeature(form=1)
labels = a.get_labels()
self.assertEqual(labels, a.data)
def test_get_idx_map(self):
a = FormFeature(form=1)
b = a.get_idx_map()
self.assertIsNotNone(a._idx_map)
c = a.get_idx_map()
self.assertIs(b, c)
def test_rebuild_idx_map_on_change(self):
a = FormFeature(form=1)
b = a.get_idx_map()
self.assertIsNotNone(a._idx_map)
c = a.get_idx_map()
self.assertIs(b, c)
a.data = (('D', 'B'), )
d = a.get_idx_map()
self.assertIsNot(d, c)
def test_get_group_order(self):
a = FormFeature(form=1)
self.assertEqual(a.get_group_order(None),
[('A', ), ('B', ), ('C', ), ('D', )])
def test_transform(self):
a = FormFeature(form=1)
a.transform([None, None])
self.assertTrue(hasattr(a, '_idx_map'))
class TestEncodedFeature(unittest.TestCase):
def test_encode_values(self):
a = EncodedFeature(segments=5)
data = [((0, ), 3, 1), (None, 1, 1), ((1, ), 3, 2)]
res = a.encode_values(data, (2, ))
expected = [
0, 1.997889e-159, 5.399097e-2, 8.363952e-210, 0,
0, 3.995779e-159, 1.079819e-1, 1.672790e-209, 0]
try:
numpy.testing.assert_array_almost_equal(
res,
expected)
except AssertionError as e:
self.fail(e)
def test_encode_values_saved_length(self):
a = EncodedFeature(segments=5)
data = [((0, 1), 3, 1), ((1, 0), 3, 2), (None, 1, 1)]
res = a.encode_values(data, (2, 2), saved_lengths=1)
expected = numpy.zeros((2, 10))
expected = numpy.array([
[0, 0, 0, 0, 0, 0, 1.997889e-159, 5.399097e-002, 8.363952e-210, 0],
[0, 3.995779e-159, 1.079819e-1, 1.672790e-209, 0, 0, 0, 0, 0, 0],
])
try:
numpy.testing.assert_array_almost_equal(
res,
expected)
except AssertionError as e:
self.fail(e)
def test_get_encoded_labels(self):
a = EncodedFeature(segments=3, start=1., end=3.)
labels = a.get_encoded_labels([('A', 'B'), ('C', 'D')])
expected = [
'A-B_1.0', 'A-B_2.0', 'A-B_3.0',
'C-D_1.0', 'C-D_2.0', 'C-D_3.0',
]
self.assertEqual(labels, expected)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "0493c2b23280d9d43155bdd04a7cd066",
"timestamp": "",
"source": "github",
"line_count": 541,
"max_line_length": 79,
"avg_line_length": 31.168207024029574,
"alnum_prop": 0.5419879017910094,
"repo_name": "crcollins/molml",
"id": "2573cbbee3166a4365c8369f15e876ead88e6d0f",
"size": "16862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "297285"
}
],
"symlink_target": ""
} |
"""
Code to test the deltmail.campaign module
This test case sends the following mails:
1. "BulkMail : Greetings from Festember" to mojo@jojo.com and phineas@ferb.com
Both email ids appear in the mail (like CC).
2. "TransactionMail : Greetings from Festember" to job@bob.com and pop@bob.com
Two *separate* mails are sent. Unlike CC.
3. "CampaignFactoryBulk : Greetings from Festember" to FOB@bob.com, pop@bob.com
All email ids appear in the mail (like CC).
4. "CampaignFactoryTransaction : Greetings from Festember" to job@bob.com, pop@bob.com,
sop@bob.com. Three *separate* mails are sent. Unlike CC.
In #3, The email body will look incomplete, because the name and message
fields in the template.mmtmpl file haven't been filled (because it's a BulkMail).
Other than these, the test suite also creates preview mails for each of the
above mails preview-mails directory. Each Test class creates preview mails
under the /preview-mails/<Test-class-name> directory.
"""
import sys
import os
import os.path as path
try:
from unittest.mock import patch
from unittest.mock import Mock
except:
from mock import patch
from mock import Mock
from deltamail.campaign import BulkMailCampaign, TransactionMailCampaign
from deltamail.campaign import CampaignFactory
from deltamail.campaign import MAX_PREVIEW_FILE_LEN
from deltamail.mail import MailFactory
class TestBulkMailCampaign(object):
"""Class to test deltamail.campaign.BulkMailCampaign class"""
args = {
"from_addr": "sender@example.com",
"subject": "BulkMail : Greetings from {{company}}",
"mailing_list": ["mojo@jojo.com", "phineas@ferb.com"],
"template_str": "Hello Human, greetings from {{company}}.\nCopyright @ {{year}}",
"global_vars": {"company": "Festember", "year": 2015}
}
subject_evaled = "BulkMail : Greetings from Festember"
body_evaled = "Hello Human, greetings from Festember.\nCopyright @ 2015"
@patch('deltamail.campaign.MailFactory', autospec=True)
def __init__(self, mock_mf):
"""Create an instance of BulkMailCampaign for testing.
Also test if it is initialized correctly."""
self.bmc = BulkMailCampaign(**self.args)
assert len(self.bmc._mails) == 1
# MailFactory calls it `variables` and not `global_vars`
# Rename it. Laziness
self.args["variables"] = self.args["global_vars"]
self.args.pop("global_vars", None)
mock_mf.assert_called_once_with(**self.args)
# acquire the actual Envelope object
self.bmc._mails = [MailFactory(**self.args)]
# Rename it back
self.args["global_vars"] = self.args["variables"]
self.args.pop("variables", None)
@patch('deltamail.campaign.os.path.isdir', autospec=True)
@patch('deltamail.campaign.os.mkdir', autospec=True)
@patch('deltamail.campaign.open')
def test_preview_default(self, mk_open, mock_mkdir, mock_isdir):
"""Test BulkMailCampaign.preview() with no args"""
newdirpath = path.join(os.getcwd(), "email-preview", "")
preview_file_name = self.subject_evaled + "-" + ",".join(self.args["mailing_list"])
preview_file_name = preview_file_name[:MAX_PREVIEW_FILE_LEN] + ".html"
preview_file_name = preview_file_name.replace(r"/", "-")
preview_file_name = preview_file_name.replace("\\", "-")
preview_file_name = preview_file_name.replace(r":", "-")
preview_file_path = path.join(newdirpath, preview_file_name)
open_return_mock = Mock()
mk_open.return_value = open_return_mock
mock_isdir.return_value = True
self.bmc.preview()
mock_mkdir.assert_called_once_with(newdirpath)
mk_open.assert_called_once_with(preview_file_path, "w")
open_return_mock.write.assert_called_once_with(self.body_evaled)
open_return_mock.close.assert_called_once_with()
@patch('deltamail.campaign.os.path.isdir', autospec=True)
@patch('deltamail.campaign.os.mkdir', autospec=True)
@patch('deltamail.campaign.open')
def test_preview_with_location(self, mk_open, mock_mkdir, mock_isdir):
"""Test BulkMailCampaign.preview() with location argument"""
newdirpath = "./tests/preview-mails/TestBulkMailCampaign/"
preview_file_name = self.subject_evaled + "-" + ",".join(self.args["mailing_list"])
preview_file_name = preview_file_name[:MAX_PREVIEW_FILE_LEN] + ".html"
preview_file_name = preview_file_name.replace(r"/", "-")
preview_file_name = preview_file_name.replace("\\", "-")
preview_file_name = preview_file_name.replace(r":", "-")
preview_file_path = path.join(newdirpath, preview_file_name)
open_return_mock = Mock()
mk_open.return_value = open_return_mock
self.bmc.preview(newdirpath)
mock_mkdir.assert_not_called()
mk_open.assert_called_once_with(preview_file_path, "w")
open_return_mock.write.assert_called_once_with(self.body_evaled)
open_return_mock.close.assert_called_once_with()
def test_send(self):
"""Test the BulkMailCampaign.send() method"""
mock_mailer = Mock(spec=['send'])
self.bmc.send(mock_mailer)
mock_mailer.send.assert_called_once_with(self.bmc._mails[0])
def test_preview_in_browser(self):
"""Test the preview_in_browser() method"""
# UNTESTED!
pass
class TestTransactionMailCampaign(object):
"""Class to test deltamail.campaign.TransactionMailCampaign class"""
args = {
"from_addr": "sender@example.com",
"subject": "TransactionMail : Hey {{name}}, Greetings from {{company}}",
"mailing_list": [
{
"email": 'job@bob.com',
"variables": {
"name": "Job",
"msg": "You got a job!"
}
},
{
"email": 'pop@bob.com',
"variables": {
"name": "Pop",
"msg": "You got popped!"
}
}
],
"template_str": "Hello {{name}}, greetings from {{company}}.\n{{msg}}\nCopyright @ {{year}}",
"global_vars": {"company": "Festember", "year": 2015}
}
evaled = [
{
"subject": "TransactionMail : Hey Job, Greetings from Festember",
"body": "Hello Job, greetings from Festember.\nYou got a job!\nCopyright @ 2015",
},
{
"subject": "TransactionMail : Hey Pop, Greetings from Festember",
"body": "Hello Pop, greetings from Festember.\nYou got popped!\nCopyright @ 2015",
}
]
variables = [
{
"name": "Job",
"msg": "You got a job!",
"company": "Festember",
"year": 2015
},
{
"name": "Pop",
"msg": "You got popped!",
"company": "Festember",
"year": 2015
}
]
@patch('deltamail.campaign.MailFactory', autospec=True)
def __init__(self, mock_mf):
"""Create an instance of TransactionMailCampaign for testing.
Also test if it is initialized correctly."""
self.tmc = TransactionMailCampaign(**self.args)
assert len(self.tmc._mails) == 2
# MailFactory calls it `variables` and not `global_vars`
# Rename it. Laziness
global_vars = self.args["global_vars"]
mailing_list = self.args["mailing_list"]
self.args.pop("global_vars", None)
self.args["variables"] = self.variables[0]
self.args["mailing_list"] = [mailing_list[0]["email"]]
# acquire the actual Envelope object
self.tmc._mails[0] = MailFactory(**self.args)
mock_mf.assert_any_call(**self.args)
self.args["variables"] = self.variables[1]
self.args["mailing_list"] = [mailing_list[1]["email"]]
# acquire the actual Envelope object
self.tmc._mails[1] = MailFactory(**self.args)
mock_mf.assert_any_call(**self.args)
# Undo the changes
self.args["mailing_list"] = mailing_list
self.args["global_vars"] = global_vars
self.args.pop("variables", None)
@patch('deltamail.campaign.os.path.isdir', autospec=True)
@patch('deltamail.campaign.os.mkdir', autospec=True)
@patch('deltamail.campaign.open')
def test_preview_default(self, mk_open, mock_mkdir, mock_isdir):
"""Test TransactionMailCampaign.preview() with no args"""
newdirpath = path.join(os.getcwd(), "email-preview", "")
open_return_mock = Mock()
mk_open.return_value = open_return_mock
mock_isdir.return_value = True
self.tmc.preview()
mock_mkdir.assert_called_once_with(newdirpath)
for i in range(len(self.evaled)):
fname = self.evaled[i]["subject"] + "-" + self.args["mailing_list"][i]["email"]
fname = fname[:MAX_PREVIEW_FILE_LEN] + ".html"
fname = fname.replace(r"/", "-")
fname = fname.replace("\\", "-")
fname = fname.replace(r":", "-")
fpath = path.join(newdirpath, fname)
mk_open.assert_any_call(fpath, "w")
open_return_mock.write.assert_any_call(self.evaled[i]["body"])
assert mk_open.call_count == 2
assert open_return_mock.write.call_count == 2
assert open_return_mock.close.call_count == 2
@patch('deltamail.campaign.os.path.isdir', autospec=True)
@patch('deltamail.campaign.os.mkdir', autospec=True)
@patch('deltamail.campaign.open')
def test_preview_with_location(self, mk_open, mock_mkdir, mock_isdir):
"""Test TransactionMailCampaign.preview() with location argument"""
newdirpath = "./tests/preview-mails/TestBulkMailCampaign/"
open_return_mock = Mock()
mk_open.return_value = open_return_mock
self.tmc.preview(newdirpath)
mock_mkdir.assert_not_called()
for i in range(len(self.evaled)):
fname = self.evaled[i]["subject"] + "-" + self.args["mailing_list"][i]["email"]
fname = fname[:MAX_PREVIEW_FILE_LEN] + ".html"
fname = fname.replace(r"/", "-")
fname = fname.replace("\\", "-")
fname = fname.replace(r":", "-")
fpath = path.join(newdirpath, fname)
mk_open.assert_any_call(fpath, "w")
open_return_mock.write.assert_any_call(self.evaled[i]["body"])
assert mk_open.call_count == 2
assert open_return_mock.write.call_count == 2
assert open_return_mock.close.call_count == 2
def test_send(self):
"""Test the TransactionMailCampaign.send() method"""
mock_mailer = Mock(spec=['send'])
self.tmc.send(mock_mailer)
mock_mailer.send.assert_any_call(self.tmc._mails[0])
mock_mailer.send.assert_any_call(self.tmc._mails[0])
assert mock_mailer.send.call_count == 2
def test_preview_in_browser(self):
"""Test the preview_in_browser() method"""
# UNTESTED!
pass
class TestCampaignFactory(object):
"""Class to test deltamail.campaign.CampaignFactory class"""
# We just check if the appropriate *-Campaign constructors are called
# with correct arguments
#
# Cases to consider:
# a. MailingList is a string (filename), file doesn't exist
# b. MailingList file exists, but is not well formed
# c. MailingList file exists, and is well formed
# d. MailingList is a list of email ids
#
# For each of those:
# 1. Template file doesn't exist
# 2. Template file exists:
# 2.1. Global Vars file doesn't exist, path not given
# 2.2. Global Vars file doesn't exists, path given
# 2.3. Global Vars file exists, path given.
from_addr = "sender@example.com"
subject = "Subject"
correct_paths = {
"fmailing_list": "tests/testCampaignFactory-files/mailingList.ml",
"fmailing_list_bad": "tests/testCampaignFactory-files/mailingList_bad.ml",
"ftemplate": "tests/testCampaignFactory-files/template.mmtmpl",
"fglobal_vars": "tests/testCampaignFactory-files/globalVars.mvar"
}
def _path_settings(self, attr, val):
if val == True:
setattr(self, attr, self.correct_paths[attr])
elif val == False:
setattr(self, attr, "NON-EXISTING")
else:
setattr(self, attr, self.correct_paths[val])
def _run(self, exception_expected):
exception_raised = False
ret = None
try:
ret = CampaignFactory(self.from_addr, self.subject,
self.fmailing_list, self.ftemplate,
self.fglobal_vars)
except:
e = sys.exc_info()[1]
exception_raised = True
if not exception_expected:
raise e
assert exception_raised == exception_expected
return ret
def test_non_existing_fmailing_list(self):
"""Test CampaignFactory if mailing_list is a string, but the file
doesn't exist"""
self._path_settings("fmailing_list", False)
# 1. Template file doesn't exist
self._path_settings("ftemplate", False)
self._path_settings("fglobal_vars", False)
self._run(True)
# 2. Template file exists
# 2.1. Global Vars file doesn't exist, path not given
self.fglobal_vars = ""
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", False)
self._run(True)
self.fglobal_vars = "fglobal_vars"
# 2.2. Global Vars file doesn't exists, path given
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", False)
self._run(True)
# 2.3. Global Vars file exists, path given.
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", True)
self._run(True)
def test_existing_malformed_fmailing_list(self):
"""Test CampaignFactory if mailing_list is a string, the file
exists, but is malformed"""
self._path_settings("fmailing_list", "fmailing_list_bad")
# 1. Template file doesn't exist
self._path_settings("ftemplate", False)
self._path_settings("fglobal_vars", False)
self._run(True)
# 2. Template file exists
# 2.1. Global Vars file doesn't exist, path not given
self.fglobal_vars = ""
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", False)
self._run(True)
self.fglobal_vars = "fglobal_vars"
# 2.2. Global Vars file doesn't exists, path given
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", False)
self._run(True)
# 2.3. Global Vars file exists, path given.
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", True)
self._run(True)
def test_existing_well_formed_fmailing_list(self):
"""Test CampaignFactory if mailing_list is a string, the file
exists, and is wellformed"""
self._path_settings("fmailing_list", "fmailing_list")
# 1. Template file doesn't exist
self._path_settings("ftemplate", False)
self._path_settings("fglobal_vars", False)
self._run(True)
# 2. Template file exists
# 2.1. Global Vars file doesn't exist, path not given
self._path_settings("ftemplate", True)
# self._path_settings("fglobal_vars", True)
self.fglobal_vars = ""
ret = self._run(False)
self.fglobal_vars = "fglobal_vars"
assert isinstance(ret, TransactionMailCampaign)
# 2.2. Global Vars file doesn't exists, path given
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", False)
self._run(True)
# 2.3. Global Vars file exists, path given.
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", True)
ret = self._run(False)
assert isinstance(ret, TransactionMailCampaign)
def test_mailing_list(self):
"""Test CampaignFactory if mailing_list is a list of bad email-ids"""
# self._path_settings("fmailing_list", "fmailing_list")
self.fmailing_list = ["job@foo", "bofoo.com", "mo@fo.com"]
# 1. Template file doesn't exist
self._path_settings("ftemplate", False)
self._path_settings("fglobal_vars", False)
self._run(True)
# 2. Template file exists
# 2.1. Global Vars file doesn't exist, path not given
self._path_settings("ftemplate", True)
# self._path_settings("fglobal_vars", True)
self.fglobal_vars = ""
ret = self._run(False)
self.fglobal_vars = "fglobal_vars"
assert isinstance(ret, BulkMailCampaign)
# 2.2. Global Vars file doesn't exists, path given
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", False)
self._run(True)
# 2.3. Global Vars file exists, path given.
self._path_settings("ftemplate", True)
self._path_settings("fglobal_vars", True)
ret = self._run(False)
assert isinstance(ret, BulkMailCampaign)
| {
"content_hash": "0e685f431b7768dc8c272a7647dc76c9",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 101,
"avg_line_length": 36.514462809917354,
"alnum_prop": 0.6001244836756634,
"repo_name": "delta/mailer",
"id": "597e91b2895636097ec13c5b44588b84cd0d326f",
"size": "17673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_campaign.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "OCaml",
"bytes": "274"
},
{
"name": "Python",
"bytes": "118345"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('aparcamientos', '0003_auto_20170522_1853'),
]
operations = [
migrations.AlterField(
model_name='aparcamiento',
name='Num_Comentario',
field=models.IntegerField(),
),
]
| {
"content_hash": "94f6954921cea7f8b6300bfa69c9137c",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 53,
"avg_line_length": 21.555555555555557,
"alnum_prop": 0.6030927835051546,
"repo_name": "feer14agui/X-Serv-Practica-Aparcamientos",
"id": "c825c3c820bc45c6a597c29062647d8a96516adc",
"size": "412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myproject/aparcamientos/migrations/0004_auto_20170522_1854.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4515"
},
{
"name": "HTML",
"bytes": "4032"
},
{
"name": "Python",
"bytes": "39383"
}
],
"symlink_target": ""
} |
import httplib
import json
import logging
import logging.config
import logging.handlers
import optparse
import os
import re
import sys
import urllib
import uuid
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
COMMANDS = """Commands:
help <command> Output help for one of the commands below
compare What is missing from the slave glance?
dump Dump the contents of a glance instance to local disk.
livecopy Load the contents of one glance instance into another.
load Load the contents of a local directory into glance.
size Determine the size of a glance instance if dumped to disk.
"""
IMAGE_ALREADY_PRESENT_MESSAGE = _('The image %s is already present on '
'the slave, but our check for it did '
'not find it. This indicates that we '
'do not have permissions to see all '
'the images on the slave server.')
SERVER_PORT_REGEX = '\w+:\w+'
class AuthenticationException(Exception):
pass
class ImageAlreadyPresentException(Exception):
pass
class ServerErrorException(Exception):
pass
class UploadException(Exception):
pass
class ImageService(object):
def __init__(self, conn, auth_token):
""" Initialize the ImageService.
conn: a httplib.HTTPConnection to the glance server
auth_token: authentication token to pass in the x-auth-token header
"""
self.auth_token = auth_token
self.conn = conn
def _http_request(self, method, url, headers, body,
ignore_result_body=False):
"""Perform an HTTP request against the server.
method: the HTTP method to use
url: the URL to request (not including server portion)
headers: headers for the request
body: body to send with the request
ignore_result_body: the body of the result will be ignored
Returns: a httplib response object
"""
if self.auth_token:
headers.setdefault('x-auth-token', self.auth_token)
logging.debug(_('Request: %(method)s http://%(server)s:%(port)s'
'%(url)s with headers %(headers)s')
% {'method': method,
'server': self.conn.host,
'port': self.conn.port,
'url': url,
'headers': repr(headers)})
self.conn.request(method, url, body, headers)
response = self.conn.getresponse()
headers = self._header_list_to_dict(response.getheaders())
code = response.status
code_description = httplib.responses[code]
logging.debug(_('Response: %(code)s %(status)s %(headers)s')
% {'code': code,
'status': code_description,
'headers': repr(headers)})
if code in [400, 500]:
raise ServerErrorException(response.read())
if code in [401, 403]:
raise AuthenticationException(response.read())
if code == 409:
raise ImageAlreadyPresentException(response.read())
if ignore_result_body:
# NOTE: because we are pipelining requests through a single HTTP
# connection, httplib requires that we read the response body
# before we can make another request. If the caller knows they
# don't care about the body, they can ask us to do that for them.
response.read()
return response
def get_images(self):
"""Return a detailed list of images.
Yields a series of images as dicts containing metadata.
"""
params = {'is_public': None}
while True:
url = '/v1/images/detail'
query = urllib.urlencode(params)
if query:
url += '?%s' % query
response = self._http_request('GET', url, {}, '')
result = json.loads(response.read())
if not result or not 'images' in result or not result['images']:
return
for image in result.get('images', []):
params['marker'] = image['id']
yield image
def get_image(self, image_uuid):
"""Fetch image data from glance.
image_uuid: the id of an image
Returns: a httplib Response object where the body is the image.
"""
url = '/v1/images/%s' % image_uuid
return self._http_request('GET', url, {}, '')
@staticmethod
def _header_list_to_dict(headers):
"""Expand a list of headers into a dictionary.
headers: a list of [(key, value), (key, value), (key, value)]
Returns: a dictionary representation of the list
"""
d = {}
for (header, value) in headers:
if header.startswith('x-image-meta-property-'):
prop = header.replace('x-image-meta-property-', '')
d.setdefault('properties', {})
d['properties'][prop] = value
else:
d[header.replace('x-image-meta-', '')] = value
return d
def get_image_meta(self, image_uuid):
"""Return the metadata for a single image.
image_uuid: the id of an image
Returns: image metadata as a dictionary
"""
url = '/v1/images/%s' % image_uuid
response = self._http_request('HEAD', url, {}, '',
ignore_result_body=True)
return self._header_list_to_dict(response.getheaders())
@staticmethod
def _dict_to_headers(d):
"""Convert a dictionary into one suitable for a HTTP request.
d: a dictionary
Returns: the same dictionary, with x-image-meta added to every key
"""
h = {}
for key in d:
if key == 'properties':
for subkey in d[key]:
if d[key][subkey] is None:
h['x-image-meta-property-%s' % subkey] = ''
else:
h['x-image-meta-property-%s' % subkey] = d[key][subkey]
else:
h['x-image-meta-%s' % key] = d[key]
return h
def add_image(self, image_meta, image_data):
"""Upload an image.
image_meta: image metadata as a dictionary
image_data: image data as a object with a read() method
Returns: a tuple of (http response headers, http response body)
"""
url = '/v1/images'
headers = self._dict_to_headers(image_meta)
headers['Content-Type'] = 'application/octet-stream'
headers['Content-Length'] = int(image_meta['size'])
response = self._http_request('POST', url, headers, image_data)
headers = self._header_list_to_dict(response.getheaders())
logging.debug(_('Image post done'))
body = response.read()
return headers, body
def add_image_meta(self, image_meta):
"""Update image metadata.
image_meta: image metadata as a dictionary
Returns: a tuple of (http response headers, http response body)
"""
url = '/v1/images/%s' % image_meta['id']
headers = self._dict_to_headers(image_meta)
headers['Content-Type'] = 'application/octet-stream'
response = self._http_request('PUT', url, headers, '')
headers = self._header_list_to_dict(response.getheaders())
logging.debug(_('Image post done'))
body = response.read()
return headers, body
def get_image_service():
"""Get a copy of the image service.
This is done like this to make it easier to mock out ImageService.
"""
return ImageService
def replication_size(options, args):
"""%(prog)s size <server:port>
Determine the size of a glance instance if dumped to disk.
server:port: the location of the glance instance.
"""
# Make sure server info is provided
if len(args) < 1:
raise TypeError(_("Too few arguments."))
server_port = args.pop()
if not re.match(SERVER_PORT_REGEX, server_port):
raise ValueError(_("Bad format of the given arguments."))
server, port = server_port.split(':')
total_size = 0
count = 0
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.slavetoken)
for image in client.get_images():
logging.debug(_('Considering image: %(image)s') % {'image': image})
if image['status'] == 'active':
total_size += int(image['size'])
count += 1
print _('Total size is %d bytes across %d images') % (total_size, count)
def replication_dump(options, args):
"""%(prog)s dump <server:port> <path>
Dump the contents of a glance instance to local disk.
server:port: the location of the glance instance.
path: a directory on disk to contain the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server_port = args.pop()
if not re.match(SERVER_PORT_REGEX, server_port):
raise ValueError(_("Bad format of the given arguments."))
server, port = server_port.split(':')
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.mastertoken)
for image in client.get_images():
logging.info(_('Considering: %s') % image['id'])
data_path = os.path.join(path, image['id'])
if not os.path.exists(data_path):
logging.info(_('... storing'))
# Dump glance information
with open(data_path, 'w') as f:
f.write(json.dumps(image))
if image['status'] == 'active' and not options.metaonly:
# Now fetch the image. The metadata returned in headers here
# is the same as that which we got from the detailed images
# request earlier, so we can ignore it here. Note that we also
# only dump active images.
logging.info(_('... image is active'))
image_response = client.get_image(image['id'])
with open(data_path + '.img', 'wb') as f:
while True:
chunk = image_response.read(options.chunksize)
if not chunk:
break
f.write(chunk)
def _dict_diff(a, b):
"""A one way dictionary diff.
a: a dictionary
b: a dictionary
Returns: True if the dictionaries are different
"""
# Only things the master has which the slave lacks matter
if set(a.keys()) - set(b.keys()):
logging.debug(_('metadata diff -- master has extra keys: %(keys)s')
% {'keys': ' '.join(set(a.keys()) - set(b.keys()))})
return True
for key in a:
if str(a[key]) != str(b[key]):
logging.debug(_('metadata diff -- value differs for key '
'%(key)s: master "%(master_value)s" vs '
'slave "%(slave_value)s"') %
{'key': key, 'master_value': a[key],
'slave_value': b[key]})
return True
return False
# This is lifted from openstack-common, but copied here to reduce dependancies
def is_uuid_like(value):
try:
uuid.UUID(value)
return True
except Exception:
return False
def replication_load(options, args):
"""%(prog)s load <server:port> <path>
Load the contents of a local directory into glance.
server:port: the location of the glance instance.
path: a directory on disk containing the data.
"""
# Make sure server and path are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
path = args.pop()
server_port = args.pop()
if not re.match(SERVER_PORT_REGEX, server_port):
raise ValueError(_("Bad format of the given arguments."))
server, port = server_port.split(':')
imageservice = get_image_service()
client = imageservice(httplib.HTTPConnection(server, port),
options.slavetoken)
updated = []
for ent in os.listdir(path):
if is_uuid_like(ent):
image_uuid = ent
logging.info(_('Considering: %s') % image_uuid)
meta_file_name = os.path.join(path, image_uuid)
with open(meta_file_name) as meta_file:
meta = json.loads(meta_file.read())
# Remove keys which don't make sense for replication
for key in options.dontreplicate.split(' '):
if key in meta:
logging.debug(_('Stripping %(header)s from saved '
'metadata'), {'header': key})
del meta[key]
if _image_present(client, image_uuid):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
logging.debug(_('Image %s already present'), image_uuid)
headers = client.get_image_meta(image_uuid)
for key in options.dontreplicate.split(' '):
if key in headers:
logging.debug(_('Stripping %(header)s from slave '
'metadata'), {'header': key})
del headers[key]
if _dict_diff(meta, headers):
logging.info(_('... metadata has changed'))
headers, body = client.add_image_meta(meta)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
else:
if not os.path.exists(os.path.join(path, image_uuid + '.img')):
logging.info(_('... dump is missing image data, skipping'))
continue
# Upload the image itself
with open(os.path.join(path, image_uuid + '.img')) as img_file:
try:
headers, body = client.add_image(meta, img_file)
_check_upload_response_headers(headers, body)
updated.append(meta['id'])
except ImageAlreadyPresentException:
logging.error(IMAGE_ALREADY_PRESENT_MESSAGE
% image_uuid)
return updated
def replication_livecopy(options, args):
"""%(prog)s livecopy <fromserver:port> <toserver:port>
Load the contents of one glance instance into another.
fromserver:port: the location of the master glance instance.
toserver:port: the location of the slave glance instance.
"""
# Make sure from-server and to-server are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
slave_server_port = args.pop()
master_server_port = args.pop()
if not re.match(SERVER_PORT_REGEX, slave_server_port) or \
not re.match(SERVER_PORT_REGEX, master_server_port):
raise ValueError(_("Bad format of the given arguments."))
imageservice = get_image_service()
slave_server, slave_port = slave_server_port.split(':')
slave_conn = httplib.HTTPConnection(slave_server, slave_port)
slave_client = imageservice(slave_conn, options.slavetoken)
master_server, master_port = master_server_port.split(':')
master_conn = httplib.HTTPConnection(master_server, master_port)
master_client = imageservice(master_conn, options.mastertoken)
updated = []
for image in master_client.get_images():
logging.info(_('Considering %(id)s') % {'id': image['id']})
for key in options.dontreplicate.split(' '):
if key in image:
logging.debug(_('Stripping %(header)s from master metadata'),
{'header': key})
del image[key]
if _image_present(slave_client, image['id']):
# NOTE(mikal): Perhaps we just need to update the metadata?
# Note that we don't attempt to change an image file once it
# has been uploaded.
headers = slave_client.get_image_meta(image['id'])
if headers['status'] == 'active':
for key in options.dontreplicate.split(' '):
if key in image:
logging.debug(_('Stripping %(header)s from master '
'metadata'), {'header': key})
del image[key]
if key in headers:
logging.debug(_('Stripping %(header)s from slave '
'metadata'), {'header': key})
del headers[key]
if _dict_diff(image, headers):
logging.info(_('... metadata has changed'))
headers, body = slave_client.add_image_meta(image)
_check_upload_response_headers(headers, body)
updated.append(image['id'])
elif image['status'] == 'active':
logging.info(_('%s is being synced') % image['id'])
if not options.metaonly:
image_response = master_client.get_image(image['id'])
try:
headers, body = slave_client.add_image(image,
image_response)
_check_upload_response_headers(headers, body)
updated.append(image['id'])
except ImageAlreadyPresentException:
logging.error(IMAGE_ALREADY_PRESENT_MESSAGE % image['id'])
return updated
def replication_compare(options, args):
"""%(prog)s compare <fromserver:port> <toserver:port>
Compare the contents of fromserver with those of toserver.
fromserver:port: the location of the master glance instance.
toserver:port: the location of the slave glance instance.
"""
# Make sure from-server and to-server are provided
if len(args) < 2:
raise TypeError(_("Too few arguments."))
slave_server_port = args.pop()
master_server_port = args.pop()
if not re.match(SERVER_PORT_REGEX, slave_server_port) or \
not re.match(SERVER_PORT_REGEX, master_server_port):
raise ValueError(_("Bad format of the given arguments."))
imageservice = get_image_service()
slave_server, slave_port = slave_server_port.split(':')
slave_conn = httplib.HTTPConnection(slave_server, slave_port)
slave_client = imageservice(slave_conn, options.slavetoken)
master_server, master_port = master_server_port.split(':')
master_conn = httplib.HTTPConnection(master_server, master_port)
master_client = imageservice(master_conn, options.mastertoken)
differences = {}
for image in master_client.get_images():
if _image_present(slave_client, image['id']):
headers = slave_client.get_image_meta(image['id'])
for key in options.dontreplicate.split(' '):
if key in image:
logging.debug(_('Stripping %(header)s from master '
'metadata'), {'header': key})
del image[key]
if key in headers:
logging.debug(_('Stripping %(header)s from slave '
'metadata'), {'header': key})
del headers[key]
for key in image:
if image[key] != headers.get(key, None):
logging.info(_('%(image_id)s: field %(key)s differs '
'(source is %(master_value)s, destination '
'is %(slave_value)s)')
% {'image_id': image['id'],
'key': key,
'master_value': image[key],
'slave_value': headers.get(key,
'undefined')})
differences[image['id']] = 'diff'
else:
logging.debug(_('%(image_id)s is identical')
% {'image_id': image['id']})
elif image['status'] == 'active':
logging.info(_('%s: entirely missing from the destination')
% image['id'])
differences[image['id']] = 'missing'
return differences
def _check_upload_response_headers(headers, body):
"""Check that the headers of an upload are reasonable.
headers: the headers from the upload
body: the body from the upload
"""
if 'status' not in headers:
try:
d = json.loads(body)
if 'image' in d and 'status' in d['image']:
return
except:
raise UploadException('Image upload problem: %s' % body)
def _image_present(client, image_uuid):
"""Check if an image is present in glance.
client: the ImageService
image_uuid: the image uuid to check
Returns: True if the image is present
"""
headers = client.get_image_meta(image_uuid)
return 'status' in headers
def parse_options(parser, cli_args):
"""Returns the parsed CLI options, command to run and its arguments, merged
with any same-named options found in a configuration file
parser: the option parser
cli_args: the arguments passed on the command line
Returns: a tuple of (the parsed options, the command, the command name)
"""
if not cli_args:
cli_args.append('-h') # Show options in usage output...
(options, args) = parser.parse_args(cli_args)
# HACK(sirp): Make the parser available to the print_help method
# print_help is a command, so it only accepts (options, args); we could
# one-off have it take (parser, options, args), however, for now, I think
# this little hack will suffice
options.__parser = parser
if not args:
parser.print_usage()
sys.exit(0)
command_name = args.pop(0)
command = lookup_command(parser, command_name)
return (options, command, args)
def print_help(options, args):
"""Print help specific to a command.
options: the parsed command line options
args: the command line
"""
if len(args) != 1:
print COMMANDS
sys.exit(1)
parser = options.__parser
command_name = args.pop()
command = lookup_command(parser, command_name)
print command.__doc__ % {'prog': os.path.basename(sys.argv[0])}
def lookup_command(parser, command_name):
"""Lookup a command.
parser: the command parser
command_name: the command name
Returns: a method which implements that command
"""
BASE_COMMANDS = {'help': print_help}
REPLICATION_COMMANDS = {'compare': replication_compare,
'dump': replication_dump,
'livecopy': replication_livecopy,
'load': replication_load,
'size': replication_size}
commands = {}
for command_set in (BASE_COMMANDS, REPLICATION_COMMANDS):
commands.update(command_set)
try:
command = commands[command_name]
except KeyError:
parser.print_usage()
sys.exit(_("Unknown command: %s") % command_name)
return command
def logging_excepthook(type, value, tb):
extra = {}
extra['exc_info'] = (type, value, tb)
logging.critical(str(value), **extra)
def main():
usage = """
%%prog <command> [options] [args]
%s
""" % COMMANDS
oparser = optparse.OptionParser(usage=usage.strip())
# Options
oparser.add_option('-c', '--chunksize', action="store", default=65536,
help="Amount of data to transfer per HTTP write")
oparser.add_option('-d', '--debug', action="store_true", default=False,
help="Print debugging information")
oparser.add_option('-D', '--dontreplicate', action="store",
default=('created_at date deleted_at location '
'updated_at'),
help="List of fields to not replicate")
oparser.add_option('-m', '--metaonly', action="store_true", default=False,
help="Only replicate metadata, not images")
oparser.add_option('-l', '--logfile', action="store", default='',
help="Path of file to log to")
oparser.add_option('-s', '--syslog', action="store_true", default=False,
help="Log to syslog instead of a file")
oparser.add_option('-t', '--token', action="store", default='',
help=("Pass in your authentication token if you have "
"one. If you use this option the same token is "
"used for both the master and the slave."))
oparser.add_option('-M', '--mastertoken', action="store", default='',
help=("Pass in your authentication token if you have "
"one. This is the token used for the master."))
oparser.add_option('-S', '--slavetoken', action="store", default='',
help=("Pass in your authentication token if you have "
"one. This is the token used for the slave."))
oparser.add_option('-v', '--verbose', action="store_true", default=False,
help="Print more verbose output")
(options, command, args) = parse_options(oparser, sys.argv[1:])
# Setup logging
root_logger = logging.root
if options.debug:
root_logger.setLevel(logging.DEBUG)
elif options.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter()
if options.syslog:
handler = logging.handlers.SysLogHandler(address='/dev/log')
elif options.logfile:
handler = logging.handlers.WatchedFileHandler(options.logfile)
else:
handler = logging.StreamHandler(sys.stdout)
sys.excepthook = logging_excepthook
if options.token:
options.slavetoken = options.token
options.mastertoken = options.token
handler.setFormatter(formatter)
root_logger.addHandler(handler)
try:
command(options, args)
except TypeError as e:
logging.error(command.__doc__ % {'prog': command.__name__})
sys.exit("ERROR: %s" % e)
except ValueError as e:
logging.error(command.__doc__ % {'prog': command.__name__})
sys.exit("ERROR: %s" % e)
except Exception as e:
raise
if __name__ == '__main__':
main()
| {
"content_hash": "423dc75e851e54d00bce9a1e3b3d16bf",
"timestamp": "",
"source": "github",
"line_count": 784,
"max_line_length": 79,
"avg_line_length": 35.140306122448976,
"alnum_prop": 0.559092558983666,
"repo_name": "SUSE-Cloud/glance",
"id": "c04262b645950067dab0ccb02ffdb2303cf068c2",
"size": "28264",
"binary": false,
"copies": "3",
"ref": "refs/heads/stable/havana",
"path": "glance/cmd/replicator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2557049"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
} |
"""
Routines to generate synthetic elections of "type 1".
Called from syn.py.
"""
"""
e is of type multi.Election
synpar is an object of type syn.Syn_Parameters
"""
import copy
import numpy as np
import audit_orders
import election_spec
import outcomes
import reported
import syn
import utils
import write_csv
"""
Routines work with the following parameters (defaults in brackets):
cids = # number of contests [2]
n_cids_wrong = # number of contests with wrong reported outcome [0]
min_n_selids_per_cid = minimum number of selids per contest [2]
max_n_selids_per_cid = maximum number of selids per contest [5]
n_pbcids = # number of pbcids [2]
n_pbcids_nocvr = # number of collections with no CVRs [0]
min_n_bids_per_pbcid = minimum number of bids per pbcid [10]
max_n_bids_per_pbcid = maximum number of bids per pbcid [20]
box_size = max number of ballots in a box [100]
min_pbcids_per_cid = minimum number of pbcids per contest [1]
max_pbcids_per_cid = maximum number of pbcids per contest [1]
dropoff = rate at which votes drop off with selection (geometric) [0.9]
error_rate = rate at which reported votes != actual votes [0.005]
seed = random number seed (for reproducibility) [1]
RandomState = state for random number generator
### following are then computed ###
### in e:
cids = list of cids (of length n_cids)
cids_wrong = list of cids that will have wrong output
pbcids = list of pbcids (of length syn_n_pbcids)
cvr_type_p = mapping of pbcid to "CVR" or "noCVR"
### in syn:
n_bids_p = mapping from pbcid to number of bids in that pbcid
We fill in the values of the fields of election e as if they
had been read in, or else we (optionally) output the values as csv files.
"""
def default_parameters(synpar):
synpar.n_cids = 2
synpar.n_cids_wrong = 0
synpar.min_n_selids_per_cid = 2
synpar.max_n_selids_per_cid = 5
synpar.n_pbcids = 2
synpar.n_pbcids_nocvr = 0
synpar.min_n_bids_per_pbcid = 200
synpar.max_n_bids_per_pbcid = 200
synpar.box_size = 100
synpar.min_pbcids_per_cid = 1
synpar.max_pbcids_per_cid = synpar.n_pbcids
synpar.dropoff = 0.9
synpar.error_rate = 0.005
synpar.seed = 1
synpar.RandomState = np.random.RandomState(synpar.seed)
synpar.margin = 0.01
##############################################################################
## election specification
def generate_election_spec(e, synpar):
"""
e = multi.Election()
synpar = Syn_Params()
synpar supplies additional paramters as noted above;
add to e values that would be otherwise read in,
e.g. via election_spec.py
(read_election_spec_general,
read_election_spec_contests,
read_election_spec_contest_groups,
read_election_spec_collections)
"""
generate_election_spec_general(e, synpar)
generate_election_spec_contests(e, synpar)
generate_election_spec_contest_groups(e, synpar)
generate_election_spec_collections(e, synpar)
election_spec.finish_election_spec(e)
election_spec.check_election_spec(e)
def generate_election_spec_general(e, synpar):
# reset syn.RandomState from synpar.seed
synpar.RandomState = np.random.RandomState(synpar.seed)
dts = utils.datetime_string()
e.election_name = "TestElection-"+dts
if e.election_dirname=="":
e.election_dirname = "TestElection-"+dts
e.election_date = dts
e.election_url = "None"
def generate_election_spec_contests(e, synpar):
# check number of contests
assert isinstance(synpar.n_cids, int) and synpar.n_cids >= 1
# make cid for each contest
e.cids = set("con{}".format(i+1) for i in range(synpar.n_cids))
# generate contest types as plurality and additional parameters
# no write-ins
for cid in e.cids:
e.contest_type_c[cid] = "plurality"
e.params_c[cid] = ""
e.write_ins_c[cid] = "no"
# check number of cids with wrong reported outcome
assert isinstance(synpar.n_cids_wrong, int)
assert 0 <= synpar.n_cids_wrong <= synpar.n_cids
# determine which, if any, cids have wrong reported outcome
cids_list = list(e.cids)
synpar.RandomState.shuffle(cids_list) # in-place
synpar.cids_wrong = cids_list[:synpar.n_cids_wrong]
# generate selids for each cid
e.n_selids_c = {}
e.selids_c = {}
for cid in e.cids:
e.n_selids_c[cid] = syn.geospace_choice(e,
synpar,
synpar.min_n_selids_per_cid,
synpar.max_n_selids_per_cid)
e.selids_c[cid] = {"sel{}".format(i):True for i in range(1, e.n_selids_c[cid]+1)}
# generate possible votes for each cid
for cid in e.cids:
if e.contest_type_c[cid] == "plurality":
for selid in e.selids_c[cid]:
utils.nested_set(e.votes_c, [cid, (selid,)], True)
else:
utils.myerror(("Contest {} is not plurality---"
"Can't generate votes for it.")
.format(cid))
def generate_election_spec_contest_groups(e, synpar):
"""
Greate synpar.n_cids-1 'random' contest groups.
They get ids like 'gid2-6' meaning they cover cids 2 to 6 inclusive.
"""
e.gids = []
cids_list = sorted(list(e.cids))
for (low, high) in syn.generate_segments(e, synpar, 1, synpar.n_cids):
gid = "gid{}-{}".format(low, high)
e.cgids_g[gid] = cids_list[low:high+1]
def generate_election_spec_collections(e, synpar):
# generate list of pbcids
assert isinstance(synpar.n_pbcids, int) and synpar.n_pbcids >= 1
e.pbcids = ["pbc{}".format(i) for i in range(1, synpar.n_pbcids+1)]
# add managers
for pbcid in e.pbcids:
e.manager_p[pbcid] = "Nobody"
# number of pbcids with no CVR
assert isinstance(synpar.n_pbcids_nocvr, int) and \
0 <= synpar.n_pbcids_nocvr <= synpar.n_pbcids
# identify which pbcids have types CVR or noCVR
e.cvr_type_p = {}
while len(e.cvr_type_p) < synpar.n_pbcids_nocvr:
e.cvr_type_p[synpar.RandomState.choice[e.pbcids]] = "noCVR"
for pbcid in e.pbcids:
if pbcid not in e.cvr_type_p:
e.cvr_type_p[pbcid] = "CVR"
# record randomly chosen required and possible contest groups for each pbcid
for pbcid in e.pbcids:
if len(e.gids)>0:
e.required_gid_p[pbcid] = synpar.RandomState.choice(e.gids)
e.possible_gid_p[pbcid] = synpar.RandomState.choice(e.gids)
else:
e.required_gid_p[pbcid] = ""
e.possible_gid_p[pbcid] = ""
election_spec.finish_election_spec_contest_groups(e)
##############################################################################
## reported results
def generate_reported(e, synpar):
generate_n_bids_p(e, synpar)
generate_bids_p(e, synpar)
generate_cids_b(e, synpar)
generate_rv_cpb(e, synpar)
generate_reported_ballot_manifests(e, synpar)
compute_reported_stats(e, synpar)
def generate_n_bids_p(e, synpar):
""" Generate number of bids for each pbcid. """
synpar.n_bids_p = {}
for pbcid in e.pbcids:
synpar.n_bids_p[pbcid] = syn.geospace_choice(e,
synpar,
synpar.min_n_bids_per_pbcid,
synpar.max_n_bids_per_pbcid)
def generate_bids_p(e, synpar):
"""
Generate list of ballot ids for each pbcid: bid1, bid2, ... .
Note that these need only be unique within a pbcid, not globally.
"""
synpar.n_bids = 0
e.bids_p = {}
for pbcid in e.pbcids:
e.bids_p[pbcid] = []
for i in range(synpar.n_bids_p[pbcid]):
synpar.n_bids += 1
bid = "bid{}".format(synpar.n_bids)
e.bids_p[pbcid].append(bid)
def generate_cids_b(e, synpar):
"""
Determine what contest(s) are on the ballot for each bid and pbcid
Determine if contest is CVR or not
draw from selection
Also sets: synpar.required_gid_b
synpar.possible_gid_b
Assumes we already have the bids that correspond to the given paper ballot
collections. What we want to do is assign contests to those ballot
ids based on what contests are in the given pbcids as well as assign
selections based on the possible selections for each contest.
"""
# synpar.cids_b
synpar.cids_b = {}
for pbcid in e.pbcids:
synpar.required_gid_b = {}
synpar.possible_gid_b = {}
for bid in e.bids_p[pbcid]:
if len(e.gids) > 0:
synpar.required_gid_b[bid] = synpar.RandomState.choice(e.gids)
synpar.possible_gid_b[bid] = synpar.RandomState.choice(e.gids)
required_cids_b = set(e.cids_g[e.required_gid_b[bid]])
possible_cids_b = set(e.cids_g[e.possible_gid_b[bid]])
else:
synpar.required_gid_b[bid] = "" # means no contests required
synpar.possible_gid_b[bid] = "" # means any contest is possible
required_cids_b = set()
possible_cids_b = set(e.cids)
# now determine cids for this ballot, i.e. synpar.cids_b[bid]
synpar.cids_b[bid] = set()
required_cids_p = set(e.required_cid_p[pbcid])
required_cids = required_cids_p.union(required_cids_b)
for cid in required_cids:
synpar.cids_b[bid].add(cid)
possible_cids_p = set(e.possible_cid_p[pbcid])
possible_cids = possible_cids_p.intersection(possible_cids_b)
for cid in possible_cids:
if synpar.RandomState.choice([True, False]):
synpar.cids_b[bid].add(cid)
synpar.cids_b[bid] = list(synpar.cids_b[bid])
def generate_rv_cpb(e, synpar):
""" Generate the reported selection for each contest and ballot.
That is, populate rv_cpb, by drawing from selids_c[cid] for each cid.
"""
e.rv_cpb = {}
for pbcid in e.pbcids:
for bid in e.bids_p[pbcid]:
for cid in synpar.cids_b[bid]:
selids = list(e.selids_c[cid])
if e.contest_type_c[cid] == 'plurality':
# give min(selids) an "edge" (expected margin) for winning
if synpar.RandomState.uniform() <= synpar.margin:
selection = min(selids)
else:
selection = synpar.RandomState.choice(selids)
rv = (selection,)
utils.nested_set(e.rv_cpb, [cid, pbcid, bid], rv)
else:
# assume otherwise that vote is permutation of selids
# (This will need refinement later presumably.)
rv = list(selids)
synpar.RandomState.shuffle(rv)
utils.nested_set(e.rv_cpb, [cid, pbcid, bid], rv)
def compute_reported_stats(e, synpar):
reported.compute_rn_cpr(e)
reported.compute_rn_c(e)
reported.compute_rn_p(e)
reported.compute_rn_cr(e)
outcomes.compute_ro_c(e)
def generate_reported_ballot_manifests(e, synpar):
"""
Generate synthetic ballot manifest data.
This procedure must be run *after* generate_reported.
"""
for pbcid in e.pbcids:
for i, bid in enumerate(e.bids_p[pbcid]):
boxid = 1+((i+1)//synpar.box_size)
position = 1+(i%synpar.box_size)
stamp = "stmp"+"{:06d}".format((i+1)*17)
utils.nested_set(e.boxid_pb, [pbcid, bid], "box{}".format(boxid))
utils.nested_set(e.position_pb, [pbcid, bid], position)
utils.nested_set(e.stamp_pb, [pbcid, bid], stamp)
utils.nested_set(e.required_gid_pb, [pbcid, bid], "")
utils.nested_set(e.possible_gid_pb, [pbcid, bid], "")
utils.nested_set(e.comments_pb, [pbcid, bid], "")
##############################################################################
## audit
def generate_audit(e, synpar):
generate_audit_spec(e, synpar)
generate_audit_orders(e, synpar)
generate_audited_votes(e, synpar)
# (audit stages will be generated by audit itself)
def generate_audit_spec(e, synpar):
generate_audit_spec_global(e, synpar)
generate_audit_spec_contest(e, synpar)
generate_audit_spec_collection(e, synpar)
generate_audit_spec_seed(e, synpar)
def generate_audit_spec_global(e, synpar):
e.max_stage_time = "9999-12-31-23-59-59"
def generate_audit_spec_contest(e, synpar):
# Generate one measurement per contest
# Audit all contests
for i, cid in enumerate(e.cids):
mid = "M{}-{}".format(i, cid)
e.mids.append(mid)
e.cid_m[mid] = cid
e.risk_method_m[mid] = "Bayes"
e.risk_limit_m[mid] = 0.05
e.risk_upset_m[mid] = 0.98
e.sampling_mode_m[mid] = "Active"
e.initial_status_m[mid] = "Open"
e.risk_measurement_parameters_m[mid] = ()
def generate_audit_spec_collection(e, synpar):
DEFAULT_MAX_AUDIT_RATE = 40
for pbcid in e.pbcids:
e.max_audit_rate_p[pbcid] = DEFAULT_MAX_AUDIT_RATE
def generate_audit_spec_seed(e, synpar):
"""
Generate a pseudo-random audit_seed.
Here audit_seed has limited range (2**32 possible values)
but this is only for synthetic elections, so
this isn't so important.
"""
e.audit_seed = synpar.RandomState.randint(0, 2**32-1)
def generate_audit_orders(e, synpar):
audit_orders.compute_audit_orders(e)
def generate_audited_votes(e, synpar):
e.av_cpb = {}
for cid in e.rv_cpb:
for pbcid in e.rv_cpb[cid]:
for bid in e.rv_cpb[cid][pbcid]:
rv = e.rv_cpb[cid][pbcid][bid]
av = e.rv_cpb[cid][pbcid][bid] # default no error
if (synpar.RandomState.uniform() <= synpar.error_rate):
selids = list(e.selids_c[cid])
if rv in selids and len(selids)>1:
selids.remove(rv)
av = (synpar.RandomState.choice(selids),)
utils.nested_set(e.av_cpb, [cid, pbcid, bid], av)
##############################################################################
##
def generate_syn_type_1(e, args):
synpar = copy.copy(args)
default_parameters(synpar)
generate_election_spec(e, synpar)
generate_reported(e, synpar)
generate_audit(e, synpar)
debug = False
if debug:
for key in sorted(vars(e)):
print(key)
print(" ", vars(e)[key])
write_csv.write_csv(e)
| {
"content_hash": "366347afdd61b9b11d25be832cbc5069",
"timestamp": "",
"source": "github",
"line_count": 450,
"max_line_length": 89,
"avg_line_length": 33.12222222222222,
"alnum_prop": 0.5852398523985239,
"repo_name": "ron-rivest/2017-bayes-audit",
"id": "5914e74e8d13fa7bcd6f92a1b6dd7ea009fd7d9b",
"size": "14962",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "2017-code/syn1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "4831"
},
{
"name": "Python",
"bytes": "486957"
}
],
"symlink_target": ""
} |
from .fields import BitField, Field
from nettest.exceptions import NettestError
import struct
class PacketMeta(type):
def __new__(cls, name, bases, attrs):
fields = attrs.get('fields')
if fields is None:
raise NettestError(_("packet class must have 'fields' field"))
_fields = []
for fieldname in attrs['fields']:
field = attrs.get(fieldname)
if field is None:
for baseclass in bases:
field = getattr(baseclass, fieldname)
if field is not None:
break
else:
raise NettestError(_("field '%s' doesn't exsists in class %s")%(fieldname, name))
if not cls.__check_field_type(cls, field):
raise NettestError(_("field '%s' in class %s should be in type (Field, Packet, list)")%(fieldname, name))
_fields.append((fieldname, field))
if isinstance(field, Field):
attrs[fieldname] = field.default_value
if '_fields' in attrs:
raise NettestError(_("the name '_fields' is reserved in class %s")%(name))
attrs['_fields']= _fields
return super(PacketMeta, cls).__new__(cls, name, bases, attrs)
@staticmethod
def __check_field_type(cls, field):
if not isinstance(field, (Field, Packet, list)):
return False
if isinstance(field, (list)):
for subfield in field:
if not cls.__check_field_type(cls, subfield):
return False
return True
class BitDumper(object):
def __init__(self):
self.data= []
self.data_len = []
self.data_len_sum = 0
def clear(self):
self.data = []
self.data_len = []
self.data_len_sum = 0
def push(self, data, length):
data = int(data)
if data < 0 or data > 2**length:
raise NettestError(_("bit value out of range"))
self.data.append(data)
self.data_len.append(length)
self.data_len_sum += length
def dump(self):
if self.data_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
data = 0
left_len = self.data_len_sum
index = 0
for field_data in self.data:
data += field_data<<(left_len - self.data_len[index])
left_len -= self.data_len[index]
index += 1
length = self.data_len_sum / 8
if length == 1:
return struct.pack('!B', int(data))
elif length == 2:
return struct.pack('!H', int(data))
elif length == 4:
return struct.pack('!I', int(data))
elif length == 8:
return struct.pack('!Q', int(data))
else:
raise NettestError(_("too long bit field"))
class BitLoader(object):
def __init__(self, packet):
self.fields = []
self.bit_len_sum = 0
self.packet = packet
def clear(self):
self.fields = []
self.bit_len_sum = 0
def push(self, fieldname, field):
self.fields.append((fieldname,field))
self.bit_len_sum += field.length
def load(self, data):
if self.bit_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
byte_len = int(self.bit_len_sum / 8)
data = data[:byte_len]
loaded_len = 0
for field_name, field in self.fields:
field_data = field.from_netbytes(data, loaded_len)
loaded_len += field.length
setattr(self.packet, field_name, field_data)
return byte_len
class Packet(object, metaclass=PacketMeta):
'''define field order
'''
fields=[]
def __init__(self):
for field_name, field in self._fields:
if isinstance(field, Packet):
setattr(self, field_name, field.__class__())
def dump(self):
'''Serialize self to bytes
'''
data = b''
bit_dumper = BitDumper()
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
raise NettestError(_("%s is None and haven't default value")%(field_name))
if isinstance(field, BitField):
bit_dumper.push(field_value, field.length)
continue
else:
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
bit_dumper.clear()
if isinstance(field, Packet):
data += field_value.dump()
continue
data += field.to_netbytes(field_value)
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
return data
# def __dump_list_data(self, fields):
# data = b''
# for field in fields:
# if isinstance(field, Packet):
# data += field.dump()
# continue
# if isinstance(field, list):
# data += self.__dump_list_data()
# continue
# if isinstance(field, Field):
# data += field.to_netbytes(field_value)
# continue
def load(self, data):
'''Deserialize bytes to a self.
if success, return the total data length used
else return None
'''
loaded_len = 0
bit_loader = BitLoader(self)
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_loader.push(field_name, field)
continue
else:
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
bit_loader.clear()
if isinstance(field, Packet):
field_value = getattr(self, field_name)
length = field_value.load(data[loaded_len:])
if length is None:
return None
loaded_len += length
continue
field_data = field.from_netbytes(data[loaded_len:])
if field_data is None:
return None
loaded_len += field.length
setattr(self, field_name, field_data)
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
return loaded_len
def to_printable(self):
string = ''
string += '-'*20+str(self.__class__.__name__)+'-'*20+'\n'
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
string += '%s\tNone\n'%(field_name)
elif isinstance(field, Packet):
string += '%s\t%s\n'%(field_name, field_value.to_printable())
else:
string += '%s\t%s\n'%(field_name, field.to_printable(field_value))
string += '-'*(40+len(self.__class__.__name__))+'\n'
return string
def __eq__(self, other):
for field_name in self.fields:
field_value1 = getattr(self, field_name)
field_value2 = getattr(other, field_name)
if field_value1 != field_value2:
return False
return True
@property
def length(self):
total_len = 0
bit_len = 0
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_len += field.length
elif field.length > 0:
total_len += field.length
else:
field_value = getattr(self, field_name)
total_len += len(field_value)
total_len += int(bit_len/8)
return total_len
| {
"content_hash": "8bcc50d274e55d87d2d315c5aaa36b96",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 121,
"avg_line_length": 34.52173913043478,
"alnum_prop": 0.5090680100755668,
"repo_name": "public0821/nettest",
"id": "45656983befaf2efb662bff8a0f6da12eb280a68",
"size": "7940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nettest/packets/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "44853"
}
],
"symlink_target": ""
} |
from nchains import *
| {
"content_hash": "cec27b49a479734e1401834b93a16ae2",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 22,
"alnum_prop": 0.7727272727272727,
"repo_name": "plewis/phycas",
"id": "193088dc9bbcb9f08273b7004e70fdd6415f9c2d",
"size": "22",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/nchains/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7082"
},
{
"name": "C",
"bytes": "48372"
},
{
"name": "C++",
"bytes": "2149872"
},
{
"name": "HTML",
"bytes": "937"
},
{
"name": "OpenEdge ABL",
"bytes": "2018215"
},
{
"name": "Perl",
"bytes": "1704571"
},
{
"name": "Perl6",
"bytes": "4326"
},
{
"name": "Python",
"bytes": "1213646"
},
{
"name": "Shell",
"bytes": "29270"
},
{
"name": "Terra",
"bytes": "1375348"
}
],
"symlink_target": ""
} |
import settings
def tag(bucket, doc):
def wrapped(inner, **options):
options[bucket] = 1
if 'name' in options:
inner.__name__ = inner.name = options.pop('name')
if 'doc' in options:
inner.__doc__ = inner.doc = options.pop('doc')
for i in options.items():
setattr(inner, *i)
inner.__doc__ = inner.doc = '%s\n%s' % (inner.__doc__, ''.join([
'This is a :ref:`%s tag<%s-tags>`. ' % (tag, tag)
for tag in settings.TAG_TYPES
if hasattr(inner, tag) and
str(inner.__doc__).find('This is a :ref:`%s' % tag)==-1]))
return inner
wrapped.__doc__ = doc
return wrapped
block = tag('block', """
Block tag function decorator
Syntax::
@block([**options])
def my_tag_function(context, nodelist, [*vars], [**tag_options]):
return nodelist.render(context)
""")
comparison = tag('comparison',"""
Comparison tag function decorator
Syntax::
@comparison([**options]):
def my_comparison([*vars], [**tag_options]):
return True
""")
comparison.__doc__
filter = tag('filter',"""
Filter tag function decorator
Syntax::
@filter([**options]):
def my_filter(value, arg):
return value
""")
function = tag('function',"""
Function tag function decorator
Syntax::
@function([**options]):
def my_function([*args], [**kwargs]):
return args, kwargs
""")
| {
"content_hash": "ce280d3adf44214ff3a1ab902d04bb84",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 72,
"avg_line_length": 25.17543859649123,
"alnum_prop": 0.5616724738675958,
"repo_name": "justquick/django-native-tags",
"id": "86b6ccfb565fedddaabf688b8a7de7d4151ba432",
"size": "1435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "native_tags/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "90620"
},
{
"name": "Shell",
"bytes": "3090"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import argparse
from . import benchmark
def main():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
benchmark.set_parser(subparsers.add_parser('benchmark'))
args = parser.parse_args()
args.func(args)
| {
"content_hash": "3c01eb032b590c752347c8369f6b7a8f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 60,
"avg_line_length": 19.266666666666666,
"alnum_prop": 0.7058823529411765,
"repo_name": "lukeyeager/pencroft",
"id": "f3419c0c58fe2b72d25b553f3f4ca5e07a1ce04b",
"size": "289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pencroft/cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "15997"
}
],
"symlink_target": ""
} |
"""Support for Google Assistant Smart Home API."""
from asyncio import gather
from collections.abc import Mapping
from itertools import product
import logging
from homeassistant.util.decorator import Registry
from homeassistant.core import callback
from homeassistant.const import (
CLOUD_NEVER_EXPOSED_ENTITIES, CONF_NAME, STATE_UNAVAILABLE,
ATTR_SUPPORTED_FEATURES, ATTR_ENTITY_ID, ATTR_DEVICE_CLASS,
)
from homeassistant.components import (
camera,
climate,
cover,
fan,
group,
input_boolean,
light,
lock,
media_player,
scene,
script,
switch,
vacuum,
)
from . import trait
from .const import (
TYPE_LIGHT, TYPE_LOCK, TYPE_SCENE, TYPE_SWITCH, TYPE_VACUUM,
TYPE_THERMOSTAT, TYPE_FAN, TYPE_CAMERA, TYPE_BLINDS, TYPE_GARAGE,
TYPE_OUTLET,
CONF_ALIASES, CONF_ROOM_HINT,
ERR_FUNCTION_NOT_SUPPORTED, ERR_PROTOCOL_ERROR, ERR_DEVICE_OFFLINE,
ERR_UNKNOWN_ERROR,
EVENT_COMMAND_RECEIVED, EVENT_SYNC_RECEIVED, EVENT_QUERY_RECEIVED
)
from .helpers import SmartHomeError, RequestData
HANDLERS = Registry()
_LOGGER = logging.getLogger(__name__)
DOMAIN_TO_GOOGLE_TYPES = {
camera.DOMAIN: TYPE_CAMERA,
climate.DOMAIN: TYPE_THERMOSTAT,
cover.DOMAIN: TYPE_BLINDS,
fan.DOMAIN: TYPE_FAN,
group.DOMAIN: TYPE_SWITCH,
input_boolean.DOMAIN: TYPE_SWITCH,
light.DOMAIN: TYPE_LIGHT,
lock.DOMAIN: TYPE_LOCK,
media_player.DOMAIN: TYPE_SWITCH,
scene.DOMAIN: TYPE_SCENE,
script.DOMAIN: TYPE_SCENE,
switch.DOMAIN: TYPE_SWITCH,
vacuum.DOMAIN: TYPE_VACUUM,
}
DEVICE_CLASS_TO_GOOGLE_TYPES = {
(cover.DOMAIN, cover.DEVICE_CLASS_GARAGE): TYPE_GARAGE,
(switch.DOMAIN, switch.DEVICE_CLASS_SWITCH): TYPE_SWITCH,
(switch.DOMAIN, switch.DEVICE_CLASS_OUTLET): TYPE_OUTLET,
}
def deep_update(target, source):
"""Update a nested dictionary with another nested dictionary."""
for key, value in source.items():
if isinstance(value, Mapping):
target[key] = deep_update(target.get(key, {}), value)
else:
target[key] = value
return target
def get_google_type(domain, device_class):
"""Google type based on domain and device class."""
typ = DEVICE_CLASS_TO_GOOGLE_TYPES.get((domain, device_class))
return typ if typ is not None else DOMAIN_TO_GOOGLE_TYPES.get(domain)
class _GoogleEntity:
"""Adaptation of Entity expressed in Google's terms."""
def __init__(self, hass, config, state):
self.hass = hass
self.config = config
self.state = state
self._traits = None
@property
def entity_id(self):
"""Return entity ID."""
return self.state.entity_id
@callback
def traits(self):
"""Return traits for entity."""
if self._traits is not None:
return self._traits
state = self.state
domain = state.domain
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
self._traits = [Trait(self.hass, state, self.config)
for Trait in trait.TRAITS
if Trait.supported(domain, features, device_class)]
return self._traits
async def sync_serialize(self):
"""Serialize entity for a SYNC response.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
state = self.state
# When a state is unavailable, the attributes that describe
# capabilities will be stripped. For example, a light entity will miss
# the min/max mireds. Therefore they will be excluded from a sync.
if state.state == STATE_UNAVAILABLE:
return None
entity_config = self.config.entity_config.get(state.entity_id, {})
name = (entity_config.get(CONF_NAME) or state.name).strip()
domain = state.domain
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
# If an empty string
if not name:
return None
traits = self.traits()
# Found no supported traits for this entity
if not traits:
return None
device = {
'id': state.entity_id,
'name': {
'name': name
},
'attributes': {},
'traits': [trait.name for trait in traits],
'willReportState': False,
'type': get_google_type(domain, device_class),
}
# use aliases
aliases = entity_config.get(CONF_ALIASES)
if aliases:
device['name']['nicknames'] = aliases
for trt in traits:
device['attributes'].update(trt.sync_attributes())
room = entity_config.get(CONF_ROOM_HINT)
if room:
device['roomHint'] = room
return device
dev_reg, ent_reg, area_reg = await gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
self.hass.helpers.area_registry.async_get_registry(),
)
entity_entry = ent_reg.async_get(state.entity_id)
if not (entity_entry and entity_entry.device_id):
return device
device_entry = dev_reg.devices.get(entity_entry.device_id)
if not (device_entry and device_entry.area_id):
return device
area_entry = area_reg.areas.get(device_entry.area_id)
if area_entry and area_entry.name:
device['roomHint'] = area_entry.name
return device
@callback
def query_serialize(self):
"""Serialize entity for a QUERY response.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
state = self.state
if state.state == STATE_UNAVAILABLE:
return {'online': False}
attrs = {'online': True}
for trt in self.traits():
deep_update(attrs, trt.query_attributes())
return attrs
async def execute(self, command, data, params):
"""Execute a command.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
executed = False
for trt in self.traits():
if trt.can_execute(command, params):
await trt.execute(command, data, params)
executed = True
break
if not executed:
raise SmartHomeError(
ERR_FUNCTION_NOT_SUPPORTED,
'Unable to execute {} for {}'.format(command,
self.state.entity_id))
@callback
def async_update(self):
"""Update the entity with latest info from Home Assistant."""
self.state = self.hass.states.get(self.entity_id)
if self._traits is None:
return
for trt in self._traits:
trt.state = self.state
async def async_handle_message(hass, config, user_id, message):
"""Handle incoming API messages."""
request_id = message.get('requestId') # type: str
data = RequestData(config, user_id, request_id)
response = await _process(hass, data, message)
if response and 'errorCode' in response['payload']:
_LOGGER.error('Error handling message %s: %s',
message, response['payload'])
return response
async def _process(hass, data, message):
"""Process a message."""
inputs = message.get('inputs') # type: list
if len(inputs) != 1:
return {
'requestId': data.request_id,
'payload': {'errorCode': ERR_PROTOCOL_ERROR}
}
handler = HANDLERS.get(inputs[0].get('intent'))
if handler is None:
return {
'requestId': data.request_id,
'payload': {'errorCode': ERR_PROTOCOL_ERROR}
}
try:
result = await handler(hass, data, inputs[0].get('payload'))
except SmartHomeError as err:
return {
'requestId': data.request_id,
'payload': {'errorCode': err.code}
}
except Exception: # pylint: disable=broad-except
_LOGGER.exception('Unexpected error')
return {
'requestId': data.request_id,
'payload': {'errorCode': ERR_UNKNOWN_ERROR}
}
if result is None:
return None
return {'requestId': data.request_id, 'payload': result}
@HANDLERS.register('action.devices.SYNC')
async def async_devices_sync(hass, data, payload):
"""Handle action.devices.SYNC request.
https://developers.google.com/actions/smarthome/create-app#actiondevicessync
"""
hass.bus.async_fire(
EVENT_SYNC_RECEIVED,
{'request_id': data.request_id},
context=data.context)
devices = []
for state in hass.states.async_all():
if state.entity_id in CLOUD_NEVER_EXPOSED_ENTITIES:
continue
if not data.config.should_expose(state):
continue
entity = _GoogleEntity(hass, data.config, state)
serialized = await entity.sync_serialize()
if serialized is None:
_LOGGER.debug("No mapping for %s domain", entity.state)
continue
devices.append(serialized)
response = {
'agentUserId': data.context.user_id,
'devices': devices,
}
return response
@HANDLERS.register('action.devices.QUERY')
async def async_devices_query(hass, data, payload):
"""Handle action.devices.QUERY request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesquery
"""
devices = {}
for device in payload.get('devices', []):
devid = device['id']
state = hass.states.get(devid)
hass.bus.async_fire(
EVENT_QUERY_RECEIVED,
{
'request_id': data.request_id,
ATTR_ENTITY_ID: devid,
},
context=data.context)
if not state:
# If we can't find a state, the device is offline
devices[devid] = {'online': False}
continue
entity = _GoogleEntity(hass, data.config, state)
devices[devid] = entity.query_serialize()
return {'devices': devices}
@HANDLERS.register('action.devices.EXECUTE')
async def handle_devices_execute(hass, data, payload):
"""Handle action.devices.EXECUTE request.
https://developers.google.com/actions/smarthome/create-app#actiondevicesexecute
"""
entities = {}
results = {}
for command in payload['commands']:
for device, execution in product(command['devices'],
command['execution']):
entity_id = device['id']
hass.bus.async_fire(
EVENT_COMMAND_RECEIVED,
{
'request_id': data.request_id,
ATTR_ENTITY_ID: entity_id,
'execution': execution
},
context=data.context)
# Happens if error occurred. Skip entity for further processing
if entity_id in results:
continue
if entity_id not in entities:
state = hass.states.get(entity_id)
if state is None:
results[entity_id] = {
'ids': [entity_id],
'status': 'ERROR',
'errorCode': ERR_DEVICE_OFFLINE
}
continue
entities[entity_id] = _GoogleEntity(hass, data.config, state)
try:
await entities[entity_id].execute(execution['command'],
data,
execution.get('params', {}))
except SmartHomeError as err:
results[entity_id] = {
'ids': [entity_id],
'status': 'ERROR',
'errorCode': err.code
}
final_results = list(results.values())
for entity in entities.values():
if entity.entity_id in results:
continue
entity.async_update()
final_results.append({
'ids': [entity.entity_id],
'status': 'SUCCESS',
'states': entity.query_serialize(),
})
return {'commands': final_results}
@HANDLERS.register('action.devices.DISCONNECT')
async def async_devices_disconnect(hass, data, payload):
"""Handle action.devices.DISCONNECT request.
https://developers.google.com/actions/smarthome/create#actiondevicesdisconnect
"""
return None
def turned_off_response(message):
"""Return a device turned off response."""
return {
'requestId': message.get('requestId'),
'payload': {'errorCode': 'deviceTurnedOff'}
}
| {
"content_hash": "102e34f45676553a776bff60b2f4a742",
"timestamp": "",
"source": "github",
"line_count": 436,
"max_line_length": 87,
"avg_line_length": 29.672018348623855,
"alnum_prop": 0.585761768570766,
"repo_name": "molobrakos/home-assistant",
"id": "ab2907cf661fa374cc29270497cd660141191100",
"size": "12937",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/google_assistant/smart_home.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15057917"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
import os
import mock
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from oslo_utils import uuidutils
import six
from six import moves
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.v2 import attributes
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron.common import exceptions as n_exc
from neutron import context
from neutron import manager
from neutron import policy
from neutron import quota
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests import tools
from neutron.tests.unit import testlib_api
EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def _get_path(resource, id=None, action=None, fmt=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if fmt is not None:
path = path + '.%s' % fmt
return path
class ResourceIndexTestCase(base.BaseTestCase):
def test_index_json(self):
index = webtest.TestApp(router.Index({'foo': 'bar'}))
res = index.get('')
self.assertIn('resources', res.json)
self.assertEqual(len(res.json['resources']), 1)
resource = res.json['resources'][0]
self.assertIn('collection', resource)
self.assertEqual(resource['collection'], 'bar')
self.assertIn('name', resource)
self.assertEqual(resource['name'], 'foo')
self.assertIn('links', resource)
self.assertEqual(len(resource['links']), 1)
link = resource['links'][0]
self.assertIn('href', link)
self.assertEqual(link['href'], 'http://localhost/bar')
self.assertIn('rel', link)
self.assertEqual(link['rel'], 'self')
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
self.config_parse()
# Update the plugin
self.setup_coreplugin(plugin)
cfg.CONF.set_override('allow_pagination', True)
cfg.CONF.set_override('allow_sorting', True)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
# APIRouter initialization resets policy module, re-initializing it
policy.init()
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[resource]
policy_attrs = [name for (name, info) in attr_info.items()
if info.get('required_by_policy')]
for name, info in attr_info.items():
if info.get('primary_key'):
policy_attrs.append(name)
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=[], **kwargs):
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict(
(arg, mock.ANY) for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo', 'bar'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance.get_networks.reset_mock()
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(n_exc.Invalid, router.APIRouter)
def test_native_pagination_without_allow_sorting(self):
cfg.CONF.set_override('allow_sorting', False)
instance = self.plugin.return_value
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(len(res['networks']), 1)
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in six.iteritems(input_dict):
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(len(res['networks']), 0)
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 2)
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(next_links), 1)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(urlparse.parse_qs(url.query), params)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), params)
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(len(previous_links), 1)
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(urlparse.parse_qs(url.query),
expected_params)
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(res['networks'], [])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(len(next_links), 1)
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(urlparse.parse_qs(url.query), expect_params)
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
def test_create_use_defaults(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['admin_state_up'], True)
self.assertEqual(net['status'], "ACTIVE")
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
self._test_create_failure_bad_request('networks', data)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
self._test_create_failure_bad_request('networks', data,
extra_environ=env)
def test_create_no_body(self):
data = {'whoa': None}
self._test_create_failure_bad_request('networks', data)
def test_create_body_string_not_json(self):
data = 'a string'
self._test_create_failure_bad_request('networks', data)
def test_create_body_boolean_not_json(self):
data = True
self._test_create_failure_bad_request('networks', data)
def test_create_no_resource(self):
data = {}
self._test_create_failure_bad_request('networks', data)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
self._test_create_failure_bad_request('ports', data)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
self._test_create_failure_bad_request('networks', data)
def test_create_with_too_long_name(self):
data = {'network': {'name': "12345678" * 32,
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
def _test_create_failure_bad_request(self, resource, data, **kwargs):
res = self.api.post(_get_path(resource, fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True, **kwargs)
self.assertEqual(res.status_int, exc.HTTPBadRequest.code)
def test_create_bulk_networks_none(self):
self._test_create_failure_bad_request('networks', {'networks': None})
def test_create_bulk_networks_empty_list(self):
self._test_create_failure_bad_request('networks', {'networks': []})
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'fixed_ips': attributes.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {
'tenant_id': six.text_type(tenant_id)
}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(port['network_id'], net_id)
self.assertEqual(port['mac_address'], 'ca:fe:de:ad:be:ef')
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(res.status_int, expected_code)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
rules = oslo_policy.Rules.from_dict(
{'get_network:name': "rule:admin_only"})
policy.set_rules(rules, overwrite=False)
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
self.assertNotIn('name', res['network'])
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(res.status_int, 400)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin'
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
self.config_parse()
self.setup_coreplugin(plugin)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
router.SUB_RESOURCES['dummy'] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
attributes.RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
api = router.APIRouter()
self.api = webtest.TestApp(api)
def tearDown(self):
router.SUB_RESOURCES = {}
super(SubresourceTest, self).tearDown()
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
body = {'dummy': {'foo': 'bar', 'tenant_id': _uuid()}}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_update_subresource_to_none(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {'dummy': {}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCE_ATTRIBUTE_MAP[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def setUp(self):
super(NotificationTest, self).setUp()
fake_notifier.reset()
def _resource_op_notifier(self, opname, resource, expected_errors=False):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
'.'.join([resource, opname, "end"]))
self.assertEqual(len(fake_notifier.NOTIFICATIONS),
len(expected_events))
for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events):
self.assertEqual('INFO', msg['priority'])
self.assertEqual(event, msg['event_type'])
self.assertEqual(res.status_int, expected_code)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
class DHCPNotificationTest(APIv2TestBase):
def _test_dhcp_notifier(self, opname, resource, initial_input=None):
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(dhcp_rpc_agent_api.DhcpAgentNotifyAPI,
'notify') as dhcp_notifier:
if opname == 'create':
res = self.api.post_json(
_get_path('networks'),
initial_input)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input)
expected_code = exc.HTTPOk.code
if opname == 'delete':
res = self.api.delete(_get_path('networks', id=_uuid()))
expected_code = exc.HTTPNoContent.code
expected_item = mock.call(mock.ANY, mock.ANY,
resource + "." + opname + ".end")
if initial_input and resource not in initial_input:
resource += 's'
num = len(initial_input[resource]) if initial_input and isinstance(
initial_input[resource], list) else 1
expected = [expected_item for x in moves.range(num)]
self.assertEqual(expected, dhcp_notifier.call_args_list)
self.assertEqual(num, dhcp_notifier.call_count)
self.assertEqual(expected_code, res.status_int)
def test_network_create_dhcp_notifer(self):
input = {'network': {'name': 'net',
'tenant_id': _uuid()}}
self._test_dhcp_notifier('create', 'network', input)
def test_network_delete_dhcp_notifer(self):
self._test_dhcp_notifier('delete', 'network')
def test_network_update_dhcp_notifer(self):
input = {'network': {'name': 'net'}}
self._test_dhcp_notifier('update', 'network', input)
def test_networks_create_bulk_dhcp_notifer(self):
input = {'networks': [{'name': 'net1',
'tenant_id': _uuid()},
{'name': 'net2',
'tenant_id': _uuid()}]}
self._test_dhcp_notifier('create', 'network', input)
class QuotaTest(APIv2TestBase):
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(tools.AttributeMapMemento())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(plugin)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
manager.NeutronManager.get_plugin().supported_extension_aliases = (
["v2attrs"])
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def tearDown(self):
super(ExtensionTestCase, self).tearDown()
self.api = None
self.plugin = None
def test_extended_create(self):
net_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(res.status_int, exc.HTTPCreated.code)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net['id'], net_id)
self.assertEqual(net['status'], "ACTIVE")
self.assertEqual(net['v2attrs:something'], "123")
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin(object):
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(sorted(actual_val), expect_val)
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, None,
["fields"]))
def test_blank_values(self):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(actual_val, expect_val)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': attributes.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertOrderedEqual(expect_val, actual_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': attributes.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(actual_val, expect_val)
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify)
| {
"content_hash": "cf7bf185908447d11e69138d37b2541a",
"timestamp": "",
"source": "github",
"line_count": 1519,
"max_line_length": 79,
"avg_line_length": 41.46938775510204,
"alnum_prop": 0.5532924815849631,
"repo_name": "eonpatapon/neutron",
"id": "eae4dd21b53f06730f72401f525cf5ead91c0e64",
"size": "63633",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/api/v2/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7388312"
},
{
"name": "Shell",
"bytes": "12912"
}
],
"symlink_target": ""
} |
"""
homeassistant.components.sensor.tellstick
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Shows sensor values from tellstick sensors.
Possible config keys:
id of the sensor: Name the sensor with ID
135=Outside
only_named: Only show the named sensors
only_named=1
temperature_scale: The scale of the temperature value
temperature_scale=°C
datatype_mask: mask to determine which sensor values to show based on
https://tellcore-py.readthedocs.org
/en/v1.0.4/constants.html#module-tellcore.constants
datatype_mask=1 # only show temperature
datatype_mask=12 # only show rain rate and rain total
datatype_mask=127 # show all sensor values
"""
import logging
from collections import namedtuple
import tellcore.telldus as telldus
import tellcore.constants as tellcore_constants
from homeassistant.const import TEMP_CELCIUS
from homeassistant.helpers.entity import Entity
import homeassistant.util as util
DatatypeDescription = namedtuple("DatatypeDescription", ['name', 'unit'])
REQUIREMENTS = ['tellcore-py==1.0.4']
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up Tellstick sensors. """
sensor_value_descriptions = {
tellcore_constants.TELLSTICK_TEMPERATURE:
DatatypeDescription(
'temperature', config.get('temperature_scale', TEMP_CELCIUS)),
tellcore_constants.TELLSTICK_HUMIDITY:
DatatypeDescription('humidity', '%'),
tellcore_constants.TELLSTICK_RAINRATE:
DatatypeDescription('rain rate', ''),
tellcore_constants.TELLSTICK_RAINTOTAL:
DatatypeDescription('rain total', ''),
tellcore_constants.TELLSTICK_WINDDIRECTION:
DatatypeDescription('wind direction', ''),
tellcore_constants.TELLSTICK_WINDAVERAGE:
DatatypeDescription('wind average', ''),
tellcore_constants.TELLSTICK_WINDGUST:
DatatypeDescription('wind gust', '')
}
try:
core = telldus.TelldusCore()
except OSError:
logging.getLogger(__name__).exception(
'Could not initialize Tellstick.')
return
sensors = []
datatype_mask = util.convert(config.get('datatype_mask'), int, 127)
for ts_sensor in core.sensors():
try:
sensor_name = config[ts_sensor.id]
except KeyError:
if util.convert(config.get('only_named'), bool, False):
continue
sensor_name = str(ts_sensor.id)
for datatype in sensor_value_descriptions.keys():
if datatype & datatype_mask and ts_sensor.has_value(datatype):
sensor_info = sensor_value_descriptions[datatype]
sensors.append(
TellstickSensor(
sensor_name, ts_sensor, datatype, sensor_info))
add_devices(sensors)
class TellstickSensor(Entity):
""" Represents a Tellstick sensor. """
def __init__(self, name, sensor, datatype, sensor_info):
self.datatype = datatype
self.sensor = sensor
self._unit_of_measurement = sensor_info.unit or None
self._name = "{} {}".format(name, sensor_info.name)
@property
def name(self):
""" Returns the name of the device. """
return self._name
@property
def state(self):
""" Returns the state of the device. """
return self.sensor.value(self.datatype).value
@property
def unit_of_measurement(self):
return self._unit_of_measurement
| {
"content_hash": "58e1e4b21fcda0b748a234c1529e4a1f",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 74,
"avg_line_length": 29.158333333333335,
"alnum_prop": 0.6576164618462418,
"repo_name": "michaelarnauts/home-assistant",
"id": "e93c6e4c97fe80332038910206d8ac333388d344",
"size": "3500",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "homeassistant/components/sensor/tellstick.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "812281"
},
{
"name": "Python",
"bytes": "807777"
},
{
"name": "Shell",
"bytes": "5098"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tasks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='taskitem',
name='for_today',
field=models.BooleanField(default=False),
),
]
| {
"content_hash": "621e26191bebfb83581037393bb5c6b1",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 53,
"avg_line_length": 20.625,
"alnum_prop": 0.5666666666666667,
"repo_name": "andreiavram/organizer",
"id": "55f1f40fe83378ca382cdd2a4f82f9949dd152a9",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tasks/migrations/0002_taskitem_for_today.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1062"
},
{
"name": "HTML",
"bytes": "6009"
},
{
"name": "JavaScript",
"bytes": "6726"
},
{
"name": "Python",
"bytes": "23037"
}
],
"symlink_target": ""
} |
import os
import sys
import math
from chandelier import Chandelier, BROADCAST
import function
import generator
import filter
import random
import common
from time import sleep, time
from color import Color
DELAY = .02
device = "/dev/ttyAMA0"
ch = Chandelier()
ch.open(device)
ch.off(BROADCAST)
ch.send_entropy()
random.seed()
green = function.ConstantColor(Color(0,255,0))
green.chain(filter.Brightness(generator.Sawtooth(2)))
print("100% speed")
ch.set_speed(BROADCAST, 1000)
ch.send_pattern(BROADCAST, green)
ch.next_pattern(BROADCAST, 0)
sleep(11)
print("200% speed")
ch.set_speed(BROADCAST, 2000)
| {
"content_hash": "501e28b671e42d4ed54a752bdb215316",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 53,
"avg_line_length": 18.96875,
"alnum_prop": 0.7693574958813838,
"repo_name": "mayhem/led-chandelier",
"id": "9df83fd1ef7c60f6bc32e79778a0f8821ca7eb63",
"size": "626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "software/tests/speed.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "33566"
},
{
"name": "C++",
"bytes": "49292"
},
{
"name": "CSS",
"bytes": "38"
},
{
"name": "Makefile",
"bytes": "3393"
},
{
"name": "Python",
"bytes": "54064"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import Any, Mapping, Optional, Sequence, Union
from pydantic import BaseModel, Extra, Field, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class AuthToken(BaseModel):
class Config:
allow_mutation = False
reader: Optional[Mapping[str, Any]]
writer: Optional[Mapping[str, Any]]
class ExtraMetric(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
name: Optional[str]
type: Optional[str]
class MetricPatterns(BaseModel):
class Config:
allow_mutation = False
exclude: Optional[Sequence[str]]
include: Optional[Sequence[str]]
class Metric(BaseModel):
class Config:
extra = Extra.allow
allow_mutation = False
name: Optional[str]
type: Optional[str]
class Proxy(BaseModel):
class Config:
allow_mutation = False
http: Optional[str]
https: Optional[str]
no_proxy: Optional[Sequence[str]]
class ShareLabel(BaseModel):
class Config:
allow_mutation = False
labels: Optional[Sequence[str]]
match: Optional[Sequence[str]]
class InstanceConfig(BaseModel):
class Config:
allow_mutation = False
allow_redirects: Optional[bool]
auth_token: Optional[AuthToken]
auth_type: Optional[str]
aws_host: Optional[str]
aws_region: Optional[str]
aws_service: Optional[str]
cache_metric_wildcards: Optional[bool]
cache_shared_labels: Optional[bool]
collect_counters_with_distributions: Optional[bool]
collect_histogram_buckets: Optional[bool]
connect_timeout: Optional[float]
disable_generic_tags: Optional[bool]
empty_default_hostname: Optional[bool]
enable_health_service_check: Optional[bool]
exclude_labels: Optional[Sequence[str]]
exclude_metrics: Optional[Sequence[str]]
exclude_metrics_by_labels: Optional[Mapping[str, Union[bool, Sequence[str]]]]
extra_headers: Optional[Mapping[str, Any]]
extra_metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, ExtraMetric]]]]]
headers: Optional[Mapping[str, Any]]
histogram_buckets_as_distributions: Optional[bool]
hostname_format: Optional[str]
hostname_label: Optional[str]
ignore_tags: Optional[Sequence[str]]
include_labels: Optional[Sequence[str]]
kerberos_auth: Optional[str]
kerberos_cache: Optional[str]
kerberos_delegate: Optional[bool]
kerberos_force_initiate: Optional[bool]
kerberos_hostname: Optional[str]
kerberos_keytab: Optional[str]
kerberos_principal: Optional[str]
log_requests: Optional[bool]
metric_patterns: Optional[MetricPatterns]
metrics: Optional[Sequence[Union[str, Mapping[str, Union[str, Metric]]]]]
min_collection_interval: Optional[float]
namespace: Optional[str] = Field(None, regex='\\w*')
non_cumulative_histogram_buckets: Optional[bool]
ntlm_domain: Optional[str]
openmetrics_endpoint: str
password: Optional[str]
persist_connections: Optional[bool]
proxy: Optional[Proxy]
raw_line_filters: Optional[Sequence[str]]
raw_metric_prefix: Optional[str]
read_timeout: Optional[float]
rename_labels: Optional[Mapping[str, Any]]
request_size: Optional[float]
service: Optional[str]
share_labels: Optional[Mapping[str, Union[bool, ShareLabel]]]
skip_proxy: Optional[bool]
tag_by_endpoint: Optional[bool]
tags: Optional[Sequence[str]]
telemetry: Optional[bool]
timeout: Optional[float]
tls_ca_cert: Optional[str]
tls_cert: Optional[str]
tls_ignore_warning: Optional[bool]
tls_private_key: Optional[str]
tls_protocols_allowed: Optional[Sequence[str]]
tls_use_host_header: Optional[bool]
tls_verify: Optional[bool]
use_latest_spec: Optional[bool]
use_legacy_auth_encoding: Optional[bool]
use_process_start_time: Optional[bool]
username: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'instance_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'instance_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_instance', identity)(values))
| {
"content_hash": "0e2d2fea7a8608eb19229995923d4687",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 110,
"avg_line_length": 31.006451612903227,
"alnum_prop": 0.6978776529338327,
"repo_name": "DataDog/integrations-core",
"id": "e6b199569bc3e1a0cc7dc5889a7977f83fa45177",
"size": "5163",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "arangodb/datadog_checks/arangodb/config_models/instance.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import sys
import six
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
# Avoid case issues in comparisons
if hasattr(incoming, 'lower'):
incoming = incoming.lower()
if hasattr(encoding, 'lower'):
encoding = encoding.lower()
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
| {
"content_hash": "c3523d739d49d1bd437bc2dad09abec6",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 73,
"avg_line_length": 36.1,
"alnum_prop": 0.6478531855955678,
"repo_name": "varunarya10/oslo.utils",
"id": "14bd7175903884410d7cf1b188cefc252f106fa0",
"size": "3517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oslo_utils/encodeutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "273680"
}
],
"symlink_target": ""
} |
import sys, os
extensions = []
source_suffix = '.rst'
master_doc = 'README'
project = u'i19'
copyright = u'2013, Johannes Steger <jss@coders.de>'
release = version = '0.1'
exclude_patterns = ['build']
pygments_style = 'sphinx'
html_static_path = ['_static']
htmlhelp_basename = 'i19doc'
html_sidebars = {'**': ['localtoc.html']}
| {
"content_hash": "8f8f382a91b2e8071037ead131155149",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 52,
"avg_line_length": 27.416666666666668,
"alnum_prop": 0.6747720364741642,
"repo_name": "johaness/i19",
"id": "3cb574c28dafcba040417b0fc4d0947b6d220899",
"size": "329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "9063"
},
{
"name": "Python",
"bytes": "13111"
}
],
"symlink_target": ""
} |
"""Shared OS X support functions."""
import os
import re
import sys
__all__ = [
'compiler_fixup',
'customize_config_vars',
'customize_compiler',
'get_platform_osx',
]
# configuration variables that may contain universal build flags,
# like "-arch" or "-isdkroot", that may need customization for
# the user environment
_UNIVERSAL_CONFIG_VARS = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS', 'BASECFLAGS',
'BLDSHARED', 'LDSHARED', 'CC', 'CXX',
'PY_CFLAGS', 'PY_LDFLAGS', 'PY_CPPFLAGS',
'PY_CORE_CFLAGS')
# configuration variables that may contain compiler calls
_COMPILER_CONFIG_VARS = ('BLDSHARED', 'LDSHARED', 'CC', 'CXX')
# prefix added to original configuration variable names
_INITPRE = '_OSX_SUPPORT_INITIAL_'
def _find_executable(executable, path=None):
"""Tries to find 'executable' in the directories listed in 'path'.
A string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']. Returns the complete filename or None if not found.
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
base, ext = os.path.splitext(executable)
if (sys.platform == 'win32') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
def _read_output(commandstring):
"""Output from successful command execution or None"""
# Similar to os.popen(commandstring, "r").read(),
# but without actually using os.popen because that
# function is not usable during python bootstrap.
# tempfile is also not available then.
import contextlib
try:
import tempfile
fp = tempfile.NamedTemporaryFile()
except ImportError:
fp = open("/tmp/_osx_support.%s"%(
os.getpid(),), "w+b")
with contextlib.closing(fp) as fp:
cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name)
return fp.read().decode('utf-8').strip() if not os.system(cmd) else None
def _find_build_tool(toolname):
"""Find a build tool on current path or using xcrun"""
return (_find_executable(toolname)
or _read_output("/usr/bin/xcrun -find %s" % (toolname,))
or ''
)
_SYSTEM_VERSION = None
def _get_system_version():
"""Return the OS X system version as a string"""
# Reading this plist is a documented way to get the system
# version (see the documentation for the Gestalt Manager)
# We avoid using platform.mac_ver to avoid possible bootstrap issues during
# the build of Python itself (distutils is used to build standard library
# extensions).
global _SYSTEM_VERSION
if _SYSTEM_VERSION is None:
_SYSTEM_VERSION = ''
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except OSError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
_SYSTEM_VERSION = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
return _SYSTEM_VERSION
def _remove_original_values(_config_vars):
"""Remove original unmodified values for testing"""
# This is needed for higher-level cross-platform tests of get_platform.
for k in list(_config_vars):
if k.startswith(_INITPRE):
del _config_vars[k]
def _save_modified_value(_config_vars, cv, newvalue):
"""Save modified and original unmodified value of configuration var"""
oldvalue = _config_vars.get(cv, '')
if (oldvalue != newvalue) and (_INITPRE + cv not in _config_vars):
_config_vars[_INITPRE + cv] = oldvalue
_config_vars[cv] = newvalue
def _supports_universal_builds():
"""Returns True if universal builds are supported on this system"""
# As an approximation, we assume that if we are running on 10.4 or above,
# then we are running with an Xcode environment that supports universal
# builds, in particular -isysroot and -arch arguments to the compiler. This
# is in support of allowing 10.4 universal builds to run on 10.3.x systems.
osx_version = _get_system_version()
if osx_version:
try:
osx_version = tuple(int(i) for i in osx_version.split('.'))
except ValueError:
osx_version = ''
return bool(osx_version >= (10, 4)) if osx_version else False
def _find_appropriate_compiler(_config_vars):
"""Find appropriate C compiler for extension module builds"""
# Issue #13590:
# The OSX location for the compiler varies between OSX
# (or rather Xcode) releases. With older releases (up-to 10.5)
# the compiler is in /usr/bin, with newer releases the compiler
# can only be found inside Xcode.app if the "Command Line Tools"
# are not installed.
#
# Furthermore, the compiler that can be used varies between
# Xcode releases. Up to Xcode 4 it was possible to use 'gcc-4.2'
# as the compiler, after that 'clang' should be used because
# gcc-4.2 is either not present, or a copy of 'llvm-gcc' that
# miscompiles Python.
# skip checks if the compiler was overridden with a CC env variable
if 'CC' in os.environ:
return _config_vars
# The CC config var might contain additional arguments.
# Ignore them while searching.
cc = oldcc = _config_vars['CC'].split()[0]
if not _find_executable(cc):
# Compiler is not found on the shell search PATH.
# Now search for clang, first on PATH (if the Command LIne
# Tools have been installed in / or if the user has provided
# another location via CC). If not found, try using xcrun
# to find an uninstalled clang (within a selected Xcode).
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself (and os.popen is
# implemented on top of subprocess and is therefore not
# usable as well)
cc = _find_build_tool('clang')
elif os.path.basename(cc).startswith('gcc'):
# Compiler is GCC, check if it is LLVM-GCC
data = _read_output("'%s' --version"
% (cc.replace("'", "'\"'\"'"),))
if data and 'llvm-gcc' in data:
# Found LLVM-GCC, fall back to clang
cc = _find_build_tool('clang')
if not cc:
raise SystemError(
"Cannot locate working compiler")
if cc != oldcc:
# Found a replacement compiler.
# Modify config vars using new compiler, if not already explicitly
# overridden by an env variable, preserving additional arguments.
for cv in _COMPILER_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
cv_split = _config_vars[cv].split()
cv_split[0] = cc if cv != 'CXX' else cc + '++'
_save_modified_value(_config_vars, cv, ' '.join(cv_split))
return _config_vars
def _remove_universal_flags(_config_vars):
"""Remove all universal build arguments from config vars"""
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-arch\s+\w+\s', ' ', flags, flags=re.ASCII)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _remove_unsupported_archs(_config_vars):
"""Remove any unsupported archs from config vars"""
# Different Xcode releases support different sets for '-arch'
# flags. In particular, Xcode 4.x no longer supports the
# PPC architectures.
#
# This code automatically removes '-arch ppc' and '-arch ppc64'
# when these are not supported. That makes it possible to
# build extensions on OSX 10.7 and later with the prebuilt
# 32-bit installer on the python.org website.
# skip checks if the compiler was overridden with a CC env variable
if 'CC' in os.environ:
return _config_vars
if re.search('-arch\s+ppc', _config_vars['CFLAGS']) is not None:
# NOTE: Cannot use subprocess here because of bootstrap
# issues when building Python itself
status = os.system(
"""echo 'int main{};' | """
"""'%s' -c -arch ppc -x c -o /dev/null /dev/null 2>/dev/null"""
%(_config_vars['CC'].replace("'", "'\"'\"'"),))
if status:
# The compile failed for some reason. Because of differences
# across Xcode and compiler versions, there is no reliable way
# to be sure why it failed. Assume here it was due to lack of
# PPC support and remove the related '-arch' flags from each
# config variables not explicitly overridden by an environment
# variable. If the error was for some other reason, we hope the
# failure will show up again when trying to compile an extension
# module.
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub('-arch\s+ppc\w*\s', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _override_all_archs(_config_vars):
"""Allow override of all archs with ARCHFLAGS env var"""
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for cv in _UNIVERSAL_CONFIG_VARS:
if cv in _config_vars and '-arch' in _config_vars[cv]:
flags = _config_vars[cv]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def _check_for_unavailable_sdk(_config_vars):
"""Remove references to any SDKs not available"""
# If we're on OSX 10.5 or later and the user tries to
# compile an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail. This is particularly important with
# the standalone Command Line Tools alternative to a
# full-blown Xcode install since the CLT packages do not
# provide SDKs. If the SDK is not present, it is assumed
# that the header files and dev libs have been installed
# to /usr and /System/Library by either a standalone CLT
# package or the CLT component within Xcode.
cflags = _config_vars.get('CFLAGS', '')
m = re.search(r'-isysroot\s+(\S+)', cflags)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for cv in _UNIVERSAL_CONFIG_VARS:
# Do not alter a config var explicitly overridden by env var
if cv in _config_vars and cv not in os.environ:
flags = _config_vars[cv]
flags = re.sub(r'-isysroot\s+\S+(?:\s|$)', ' ', flags)
_save_modified_value(_config_vars, cv, flags)
return _config_vars
def compiler_fixup(compiler_so, cc_args):
"""
This function will strip '-isysroot PATH' and '-arch ARCH' from the
compile flags if the user has specified one them in extra_compile_flags.
This is needed because '-arch ARCH' adds another architecture to the
build, without a way to remove an architecture. Furthermore GCC will
barf if multiple '-isysroot' arguments are present.
"""
stripArch = stripSysroot = False
compiler_so = list(compiler_so)
if not _supports_universal_builds():
# OSX before 10.4.0, these don't support -arch and -isysroot at
# all.
stripArch = stripSysroot = True
else:
stripArch = '-arch' in cc_args
stripSysroot = '-isysroot' in cc_args
if stripArch or 'ARCHFLAGS' in os.environ:
while True:
try:
index = compiler_so.index('-arch')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
if 'ARCHFLAGS' in os.environ and not stripArch:
# User specified different -arch flags in the environ,
# see also distutils.sysconfig
compiler_so = compiler_so + os.environ['ARCHFLAGS'].split()
if stripSysroot:
while True:
try:
index = compiler_so.index('-isysroot')
# Strip this argument and the next one:
del compiler_so[index:index+2]
except ValueError:
break
# Check if the SDK that is used during compilation actually exists,
# the universal build requires the usage of a universal SDK and not all
# users have that installed by default.
sysroot = None
if '-isysroot' in cc_args:
idx = cc_args.index('-isysroot')
sysroot = cc_args[idx+1]
elif '-isysroot' in compiler_so:
idx = compiler_so.index('-isysroot')
sysroot = compiler_so[idx+1]
if sysroot and not os.path.isdir(sysroot):
from distutils import log
log.warn("Compiling with an SDK that doesn't seem to exist: %s",
sysroot)
log.warn("Please check your Xcode installation")
return compiler_so
def customize_config_vars(_config_vars):
"""Customize Python build configuration variables.
Called internally from sysconfig with a mutable mapping
containing name/value pairs parsed from the configured
makefile used to build this interpreter. Returns
the mapping updated as needed to reflect the environment
in which the interpreter is running; in the case of
a Python from a binary installer, the installed
environment may be very different from the build
environment, i.e. different OS levels, different
built tools, different available CPU architectures.
This customization is performed whenever
distutils.sysconfig.get_config_vars() is first
called. It may be used in environments where no
compilers are present, i.e. when installing pure
Python dists. Customization of compiler paths
and detection of unavailable archs is deferred
until the first extension module build is
requested (in distutils.sysconfig.customize_compiler).
Currently called from distutils.sysconfig
"""
if not _supports_universal_builds():
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
_remove_universal_flags(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
# Remove references to sdks that are not found
_check_for_unavailable_sdk(_config_vars)
return _config_vars
def customize_compiler(_config_vars):
"""Customize compiler path and configuration variables.
This customization is performed when the first
extension module build is requested
in distutils.sysconfig.customize_compiler).
"""
# Find a compiler to use for extension module builds
_find_appropriate_compiler(_config_vars)
# Remove ppc arch flags if not supported here
_remove_unsupported_archs(_config_vars)
# Allow user to override all archs with ARCHFLAGS env var
_override_all_archs(_config_vars)
return _config_vars
def get_platform_osx(_config_vars, osname, release, machine):
"""Filter values for get_platform()"""
# called from get_platform() in sysconfig and distutils.util
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
macver = _config_vars.get('MACOSX_DEPLOYMENT_TARGET', '')
macrelease = _get_system_version() or macver
macver = macver or macrelease
if macver:
release = macver
osname = "macosx"
# Use the original CFLAGS value, if available, so that we
# return the same machine type for the platform string.
# Otherwise, distutils may consider this a cross-compiling
# case and disallow installs.
cflags = _config_vars.get(_INITPRE+'CFLAGS',
_config_vars.get('CFLAGS', ''))
if macrelease:
try:
macrelease = tuple(int(i) for i in macrelease.split('.')[0:2])
except ValueError:
macrelease = (10, 0)
else:
# assume no universal support
macrelease = (10, 0)
if (macrelease >= (10, 4)) and '-arch' in cflags.strip():
# The universal build will build fat binaries, but not on
# systems before 10.4
machine = 'fat'
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return (osname, release, machine)
| {
"content_hash": "9ad96788cbc20dbbc37741113a1dfde2",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 80,
"avg_line_length": 38.07768924302789,
"alnum_prop": 0.6127648443630657,
"repo_name": "batermj/algorithm-challenger",
"id": "03d36c9f600a66ae5722d4149ff6a56cfe72a19b",
"size": "19115",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/_osx_support.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "655185"
},
{
"name": "Batchfile",
"bytes": "127416"
},
{
"name": "C",
"bytes": "33127630"
},
{
"name": "C++",
"bytes": "1364796"
},
{
"name": "CSS",
"bytes": "3163"
},
{
"name": "Common Lisp",
"bytes": "48962"
},
{
"name": "DIGITAL Command Language",
"bytes": "26402"
},
{
"name": "DTrace",
"bytes": "2196"
},
{
"name": "Go",
"bytes": "26248"
},
{
"name": "HTML",
"bytes": "385719"
},
{
"name": "Haskell",
"bytes": "33612"
},
{
"name": "Java",
"bytes": "1084"
},
{
"name": "JavaScript",
"bytes": "20754"
},
{
"name": "M4",
"bytes": "403992"
},
{
"name": "Makefile",
"bytes": "238185"
},
{
"name": "Objective-C",
"bytes": "4934684"
},
{
"name": "PHP",
"bytes": "3513"
},
{
"name": "PLSQL",
"bytes": "45772"
},
{
"name": "Perl",
"bytes": "649"
},
{
"name": "PostScript",
"bytes": "27606"
},
{
"name": "PowerShell",
"bytes": "21737"
},
{
"name": "Python",
"bytes": "55270625"
},
{
"name": "R",
"bytes": "29951"
},
{
"name": "Rich Text Format",
"bytes": "14551"
},
{
"name": "Roff",
"bytes": "292490"
},
{
"name": "Ruby",
"bytes": "519"
},
{
"name": "Scala",
"bytes": "846446"
},
{
"name": "Shell",
"bytes": "491113"
},
{
"name": "Swift",
"bytes": "881"
},
{
"name": "TeX",
"bytes": "337654"
},
{
"name": "VBScript",
"bytes": "140"
},
{
"name": "XSLT",
"bytes": "153"
}
],
"symlink_target": ""
} |
import numpy as np
import tensorflow as tf
from agents import BasicAgent, capacities
from agents.capacities import get_expected_rewards, build_batches
class PolicyAgent(BasicAgent):
"""
Agent implementing Policy gradient using Monte-Carlo control
"""
def set_agent_props(self):
self.policy_params = {
'nb_inputs': self.observation_space.shape[0] + 1
, 'nb_units': self.config['nb_units']
, 'nb_outputs': self.action_space.n
, 'initial_mean': self.config['initial_mean']
, 'initial_stddev': self.config['initial_stddev']
}
self.lr = self.config['lr']
self.discount = self.config['discount']
self.batch_size = 8
self.dtKeys = ['states', 'actions', 'rewards']
self.memoryDt = np.dtype([
('states', 'float32', (self.policy_params['nb_inputs'],))
, ('actions', 'int32', (1,))
, ('rewards', 'float32', (1,))
])
def get_best_config(self, env_name=""):
return {
'lr': 3e-3
, 'discount': 0.99
, 'nb_units': 41
, 'initial_mean': 0.
, 'initial_stddev': 0.3
}
@staticmethod
def get_random_config(fixed_params={}):
get_lr = lambda: 1e-4 + (1e-1 - 1e-4) * np.random.random(1)[0]
get_discount = lambda: 0.5 + (1 - 0.5) * np.random.random(1)[0]
get_nb_units = lambda: np.random.randint(10, 100)
get_initial_mean = lambda: 0
get_initial_stddev = lambda: 5e-1 * np.random.random(1)[0]
random_config = {
'lr': get_lr()
, 'discount': get_discount()
, 'nb_units': get_nb_units()
, 'initial_mean': get_initial_mean()
, 'initial_stddev': get_initial_stddev()
}
random_config.update(fixed_params)
return random_config
def build_graph(self, graph):
np.random.seed(self.random_seed)
with graph.as_default():
tf.set_random_seed(self.random_seed)
# Dims: bs x num_steps x state_size
self.inputs = tf.placeholder(tf.float32, shape=[None, None, self.policy_params['nb_inputs']], name='inputs')
input_shape = tf.shape(self.inputs)
dynamic_batch_size, dynamic_num_steps = input_shape[0], input_shape[1]
policy_scope = tf.VariableScope(reuse=False, name='Policy')
with tf.variable_scope(policy_scope):
policy_inputs = tf.reshape(self.inputs, [-1, self.policy_params['nb_inputs']])
probs, actions = capacities.policy(self.policy_params, policy_inputs)
self.probs = tf.reshape(probs, [dynamic_batch_size, dynamic_num_steps, self.policy_params['nb_outputs']])
self.actions = tf.reshape(actions, [dynamic_batch_size, dynamic_num_steps, 1])
self.action_t = self.actions[0, 0, 0]
with tf.variable_scope('Training'):
self.rewards = tf.placeholder(tf.float32, shape=[None, None, 1], name="reward")
self.mask_plh = tf.placeholder(tf.float32, shape=[None, None, 1], name="mask_plh")
baseline = tf.reduce_mean(self.rewards)
batch_size, num_steps = tf.shape(self.actions)[0], tf.shape(self.actions)[1]
line_indices = tf.matmul( # Line indice
tf.reshape(tf.range(0, batch_size), [-1, 1])
, tf.ones([1, num_steps], dtype=tf.int32)
)
column_indices = tf.matmul( # Column indice
tf.ones([batch_size, 1], dtype=tf.int32)
, tf.reshape(tf.range(0, num_steps), [1, -1])
)
depth_indices = tf.cast(tf.squeeze(self.actions, 2), tf.int32)
stacked_actions = tf.stack(
[line_indices, column_indices, depth_indices], 2
)
log_probs = tf.expand_dims(tf.log(tf.gather_nd(self.probs, stacked_actions)), 2)
# We want to average on sequence
self.loss = tf.reduce_mean( - tf.reduce_sum((log_probs * (self.rewards - baseline)) * self.mask_plh, 1))
adam = tf.train.AdamOptimizer(self.lr)
self.global_step = tf.Variable(0, trainable=False, name="global_step", collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.train_op = adam.minimize(self.loss, global_step=self.global_step)
self.score_plh = tf.placeholder(tf.float32, shape=[])
self.score_sum_t = tf.summary.scalar('av_score', self.score_plh)
self.loss_plh = tf.placeholder(tf.float32, shape=[])
self.loss_sum_t = tf.summary.scalar('loss', self.loss_plh)
self.all_summary_t = tf.summary.merge_all()
self.episode_id, self.inc_ep_id_op = capacities.counter("episode_id")
# Playing part
self.pscore_plh = tf.placeholder(tf.float32, shape=[])
self.pscore_sum_t = tf.summary.scalar('play_score', self.pscore_plh)
return graph
def act(self, obs):
state = np.concatenate((obs, [float(False)]))
act = self.sess.run(self.action_t, feed_dict={
self.inputs: [ [ state ] ]
})
return (act, state)
def train(self, render=False, save_every=49):
for i in range(0, self.max_iter, self.batch_size):
# Collect a batched trajectories
sequence_history, episode_id = self.collect_samples(self.env, render, self.batch_size)
# On-policy learning only
batches = build_batches(self.dtKeys, sequence_history, len(sequence_history))
self.train_controller(batches[0])
if save_every > 0 and i % save_every > (i + self.batch_size) % save_every:
self.save()
def collect_samples(self, env, render, nb_sequence=1):
sequence_history = []
av_score = []
for i in range(nb_sequence):
obs = env.reset()
score = 0
history = np.array([], dtype=self.memoryDt)
done = False
while True:
if render:
env.render()
act, state = self.act(obs)
next_obs, reward, done, info = env.step(act)
memory = np.array([(state, act, reward)], dtype=self.memoryDt)
history = np.append(history, memory)
score += reward
obs = next_obs
if done:
break
sequence_history.append(history)
av_score.append(score)
self.sess.run(self.inc_ep_id_op)
summary, episode_id = self.sess.run([self.score_sum_t, self.episode_id], feed_dict={
self.score_plh: np.mean(score),
})
self.sw.add_summary(summary, episode_id)
return sequence_history, episode_id
def train_controller(self, batch):
for i, episode_rewards in enumerate(batch['rewards']):
batch['rewards'][i] = get_expected_rewards(episode_rewards, self.discount)
_, loss = self.sess.run([self.train_op, self.loss], feed_dict={
self.inputs: batch['states']
, self.actions: batch['actions']
, self.rewards: batch['rewards']
, self.mask_plh: batch['mask']
})
summary, episode_id = self.sess.run([self.loss_sum_t, self.episode_id], feed_dict={
self.loss_plh: np.mean(loss),
})
self.sw.add_summary(summary, episode_id)
return
class ActorQCriticAgent(PolicyAgent):
"""
Agent implementing Policy gradient using Monte-Carlo control
"""
def set_agent_props(self):
super(ActorQCriticAgent, self).set_agent_props()
self.critic_params = {
'nb_inputs': self.observation_space.shape[0] + 1
, 'nb_units': self.config['nb_units']
, 'nb_outputs': self.action_space.n
, 'initial_mean': self.config['initial_mean']
, 'initial_stddev': self.config['initial_stddev']
}
self.critic_lr = self.config['critic_lr']
self.nb_critic_iter = self.config['nb_critic_iter']
self.dtKeys = ['states', 'actions', 'rewards', 'next_states', 'next_actions']
self.memoryDt = np.dtype([
('states', 'float32', (self.policy_params['nb_inputs'],))
, ('actions', 'int32', (1,))
, ('rewards', 'float32', (1,))
, ('next_states', 'float32', (self.policy_params['nb_inputs'],))
, ('next_actions', 'int32', (1,)),
])
def get_best_config(self, env_name=""):
return {
'lr': 3e-3
, 'discount': 0.99
, 'nb_units': 41
, 'initial_mean': 0.
, 'initial_stddev': 0.3
, 'critic_lr': 1e-3
}
@staticmethod
def get_random_config(fixed_params={}):
get_lr = lambda: 1e-4 + (1e-1 - 1e-4) * np.random.random(1)[0]
get_discount = lambda: 0.5 + (1 - 0.5) * np.random.random(1)[0]
get_nb_units = lambda: np.random.randint(10, 100)
get_initial_mean = lambda: 0
get_initial_stddev = lambda: 5e-1 * np.random.random(1)[0]
get_nb_critic_iter = lambda: np.random.randint(4, 50)
random_config = {
'lr': get_lr()
, 'discount': get_discount()
, 'nb_units': get_nb_units()
, 'initial_mean': get_initial_mean()
, 'initial_stddev': get_initial_stddev()
, 'critic_lr': get_lr()
,'nb_critic_iter': get_nb_critic_iter()
}
random_config.update(fixed_params)
return random_config
def build_graph(self, graph):
np.random.seed(self.random_seed)
with graph.as_default():
tf.set_random_seed(self.random_seed)
self.inputs = tf.placeholder(tf.float32, shape=[None, None, self.policy_params['nb_inputs']], name='inputs')
input_shape = tf.shape(self.inputs)
dynamic_batch_size, dynamic_num_steps = input_shape[0], input_shape[1]
inputs_mat = tf.reshape(self.inputs, [-1, self.policy_params['nb_inputs']])
policy_scope = tf.VariableScope(reuse=False, name='Policy')
with tf.variable_scope(policy_scope):
probs, actions = capacities.policy(self.policy_params, inputs_mat)
self.probs = tf.reshape(probs, [dynamic_batch_size, dynamic_num_steps, self.policy_params['nb_outputs']])
self.actions = tf.reshape(actions, [dynamic_batch_size, dynamic_num_steps, 1])
self.action_t = self.actions[0, 0, 0]
critic_scope = tf.VariableScope(reuse=False, name='QValues')
with tf.variable_scope(critic_scope):
critic_values_mat = capacities.value_f(self.critic_params, inputs_mat)
self.critic_values = tf.reshape(critic_values_mat, [dynamic_batch_size, dynamic_num_steps, self.critic_params['nb_outputs']])
fixed_critic_scope = tf.VariableScope(reuse=False, name='FixedQValues')
with tf.variable_scope(fixed_critic_scope):
self.update_fixed_vars_op = capacities.fix_scope(critic_scope)
with tf.variable_scope('Training'):
self.expected_rewards = tf.placeholder(tf.float32, shape=[None, None, 1], name="reward")
self.mask_plh = tf.placeholder(tf.float32, shape=[None, None, 1], name="mask_plh")
baseline = tf.reduce_mean(self.expected_rewards)
batch_size, num_steps = tf.shape(self.actions)[0], tf.shape(self.actions)[1]
line_indices = tf.matmul( # Line indice
tf.reshape(tf.range(0, batch_size), [-1, 1])
, tf.ones([1, num_steps], dtype=tf.int32)
)
column_indices = tf.matmul( # Column indice
tf.ones([batch_size, 1], dtype=tf.int32)
, tf.reshape(tf.range(0, num_steps), [1, -1])
)
depth_indices = tf.cast(tf.squeeze(self.actions, 2), tf.int32)
stacked_actions = tf.stack(
[line_indices, column_indices, depth_indices], 2
)
qs = tf.expand_dims(tf.gather_nd(self.critic_values, stacked_actions), 2)
log_probs = tf.expand_dims(tf.log(tf.gather_nd(self.probs, stacked_actions)), 2)
self.policy_loss = tf.reduce_mean( - tf.reduce_sum((log_probs * (tf.stop_gradient(qs) - baseline)) * self.mask_plh, 1))
adam = tf.train.AdamOptimizer(self.lr)
self.train_policy_op = adam.minimize(self.policy_loss)
self.rewards = tf.placeholder(tf.float32, shape=[None, None, 1], name="reward")
self.next_states = tf.placeholder(tf.float32, shape=[None, None, self.critic_params['nb_inputs']], name="next_states")
self.next_actions = tf.placeholder(tf.int32, shape=[None, None, 1], name="next_actions")
with tf.variable_scope(fixed_critic_scope, reuse=True):
next_states_mat = tf.reshape(self.next_states, [-1, self.critic_params['nb_inputs']])
next_critic_values_mat = capacities.value_f(self.critic_params, next_states_mat)
next_critic_values = tf.reshape(next_critic_values_mat, [dynamic_batch_size, dynamic_num_steps, self.critic_params['nb_outputs']])
batch_size, num_steps = tf.shape(self.next_actions)[0], tf.shape(self.next_actions)[1]
line_indices = tf.matmul( # Line indice
tf.reshape(tf.range(0, batch_size), [-1, 1])
, tf.ones([1, num_steps], dtype=tf.int32)
)
column_indices = tf.matmul( # Column indice
tf.ones([batch_size, 1], dtype=tf.int32)
, tf.reshape(tf.range(0, num_steps), [1, -1])
)
depth_indices = tf.cast(tf.squeeze(self.next_actions, 2), tf.int32)
next_stacked_actions = tf.stack(
[line_indices, column_indices, depth_indices], 2
)
next_qs = tf.expand_dims(tf.gather_nd(next_critic_values, next_stacked_actions), 2)
target_qs1 = tf.stop_gradient(self.rewards + self.discount * next_qs)
target_qs2 = self.rewards
stacked_targets = tf.stack([tf.squeeze(target_qs1, 2), tf.squeeze(target_qs2, 2)], 2)
batch_size, num_steps = tf.shape(self.next_states)[0], tf.shape(self.next_states)[1]
line_indices = tf.matmul( # Line indice
tf.reshape(tf.range(0, batch_size), [-1, 1])
, tf.ones([1, num_steps], dtype=tf.int32)
)
column_indices = tf.matmul( # Column indice
tf.ones([batch_size, 1], dtype=tf.int32)
, tf.reshape(tf.range(0, num_steps), [1, -1])
)
depth_indices = tf.cast(self.next_states[:, :, -1], tf.int32)
select_targets = tf.stack(
[line_indices, column_indices, depth_indices], 2
)
target_qs = tf.expand_dims(tf.gather_nd(stacked_targets, select_targets), 2)
self.critic_loss = 1/2 * tf.reduce_sum(tf.square(target_qs - qs) * self.mask_plh)
adam = tf.train.AdamOptimizer(self.critic_lr)
self.global_step = tf.Variable(0, trainable=False, name="global_step", collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.train_critic_op = adam.minimize(self.critic_loss, global_step=self.global_step)
self.policy_loss_plh = tf.placeholder(tf.float32, shape=[])
self.policy_loss_sum_t = tf.summary.scalar('policy_loss', self.policy_loss_plh)
self.critic_loss_plh = tf.placeholder(tf.float32, shape=[])
self.critic_loss_sum_t = tf.summary.scalar('critic_loss', self.critic_loss_plh)
# self.loss_plh = tf.placeholder(tf.float32, shape=[])
# self.loss_sum_t = tf.summary.scalar('loss', self.loss_plh)
self.all_summary_t = tf.summary.merge_all()
self.score_plh = tf.placeholder(tf.float32, shape=[])
self.score_sum_t = tf.summary.scalar('av_score', self.score_plh)
self.episode_id, self.inc_ep_id_op = capacities.counter("episode_id")
# Playing part
self.pscore_plh = tf.placeholder(tf.float32, shape=[])
self.pscore_sum_t = tf.summary.scalar('play_score', self.pscore_plh)
return graph
def collect_samples(self, env, render, nb_sequence=1):
sequence_history = []
av_score = []
for i in range(nb_sequence):
obs = env.reset()
act, state = self.act(obs)
score = 0
history = np.array([], dtype=self.memoryDt)
done = False
while True:
if render:
env.render()
next_obs, reward, done, info = env.step(act)
next_act, _ = self.act(next_obs)
memory = np.array([(
np.concatenate((obs, [float(False)]))
, act
, reward
, np.concatenate((next_obs, [float(done)]))
, next_act
)], dtype=self.memoryDt)
history = np.append(history, memory)
score += reward
obs = next_obs
act = next_act
if done:
break
sequence_history.append(history)
av_score.append(score)
self.sess.run(self.inc_ep_id_op)
summary, episode_id = self.sess.run([self.score_sum_t, self.episode_id], feed_dict={
self.score_plh: np.mean(score),
})
self.sw.add_summary(summary, episode_id)
return sequence_history, episode_id
def train_controller(self, batch):
expected_rewards = []
for i, episode_rewards in enumerate(batch['rewards']):
expected_rewards.append(get_expected_rewards(episode_rewards, self.discount))
# Fit Critic
av_critic_loss = []
for i in range(self.nb_critic_iter):
_, critic_loss = self.sess.run([self.train_critic_op, self.critic_loss], feed_dict={
self.inputs: batch['states']
, self.actions: batch['actions']
, self.expected_rewards: expected_rewards
, self.rewards: batch['rewards']
, self.mask_plh: batch['mask']
, self.next_states: batch['next_states']
, self.next_actions: batch['next_actions']
})
av_critic_loss.append(critic_loss)
self.sess.run(self.update_fixed_vars_op)
_, policy_loss = self.sess.run([self.train_policy_op, self.policy_loss], feed_dict={
self.inputs: batch['states']
, self.actions: batch['actions']
, self.expected_rewards: expected_rewards
, self.rewards: batch['rewards']
, self.mask_plh: batch['mask']
, self.next_states: batch['next_states']
, self.next_actions: batch['next_actions']
})
summary, _, episode_id = self.sess.run([self.all_summary_t, self.inc_ep_id_op, self.episode_id], feed_dict={
self.policy_loss_plh: policy_loss,
self.critic_loss_plh: np.mean(av_critic_loss),
})
self.sw.add_summary(summary, episode_id)
return
class ActorCriticAgent(ActorQCriticAgent):
"""
Agent implementing Policy gradient using Monte-Carlo control
"""
def set_agent_props(self):
super(ActorCriticAgent, self).set_agent_props()
self.critic_params = {
'nb_inputs': self.observation_space.shape[0] + 1
, 'nb_units': self.config['nb_units']
, 'nb_outputs': 1
, 'initial_mean': self.config['initial_mean']
, 'initial_stddev': self.config['initial_stddev']
}
self.critic_lr = self.config['critic_lr']
self.nb_critic_iter = self.config['nb_critic_iter']
self.dtKeys = ['states', 'actions', 'rewards', 'next_states']
self.memoryDt = np.dtype([
('states', 'float32', (self.policy_params['nb_inputs'],))
, ('actions', 'int32', (1,))
, ('rewards', 'float32', (1,))
, ('next_states', 'float32', (self.policy_params['nb_inputs'],))
])
def get_best_config(self, env_name=""):
return {
'lr': 3e-3
, 'discount': 0.99
, 'nb_units': 41
, 'initial_mean': 0.
, 'initial_stddev': 0.3
, 'critic_lr': 1e-2
}
@staticmethod
def get_random_config(fixed_params={}):
get_lr = lambda: 1e-4 + (1e-1 - 1e-4) * np.random.random(1)[0]
get_discount = lambda: 0.5 + (1 - 0.5) * np.random.random(1)[0]
get_nb_units = lambda: np.random.randint(10, 100)
get_initial_mean = lambda: 0
get_initial_stddev = lambda: 5e-1 * np.random.random(1)[0]
get_nb_critic_iter = lambda: np.random.randint(4, 50)
random_config = {
'lr': get_lr()
, 'discount': get_discount()
, 'nb_units': get_nb_units()
, 'initial_mean': get_initial_mean()
, 'initial_stddev': get_initial_stddev()
, 'critic_lr': get_lr()
,'nb_critic_iter': get_nb_critic_iter()
}
random_config.update(fixed_params)
return random_config
def build_graph(self, graph):
np.random.seed(self.random_seed)
with graph.as_default():
tf.set_random_seed(self.random_seed)
self.inputs = tf.placeholder(tf.float32, shape=[None, None, self.policy_params['nb_inputs']], name='inputs')
input_shape = tf.shape(self.inputs)
dynamic_batch_size, dynamic_num_steps = input_shape[0], input_shape[1]
inputs_mat = tf.reshape(self.inputs, [-1, self.policy_params['nb_inputs']])
policy_scope = tf.VariableScope(reuse=False, name='Policy')
with tf.variable_scope(policy_scope):
probs, actions = capacities.policy(self.policy_params, inputs_mat)
self.probs = tf.reshape(probs, [dynamic_batch_size, dynamic_num_steps, self.policy_params['nb_outputs']])
self.actions = tf.reshape(actions, [dynamic_batch_size, dynamic_num_steps, 1])
self.action_t = self.actions[0, 0, 0]
critic_scope = tf.VariableScope(reuse=False, name='QValues')
with tf.variable_scope(critic_scope):
critic_values_mat = capacities.value_f(self.critic_params, inputs_mat)
self.critic_values = tf.reshape(critic_values_mat, [dynamic_batch_size, dynamic_num_steps, self.critic_params['nb_outputs']])
fixed_critic_scope = tf.VariableScope(reuse=False, name='FixedQValues')
with tf.variable_scope(fixed_critic_scope):
self.update_fixed_vars_op = capacities.fix_scope(critic_scope)
with tf.variable_scope('Training'):
self.expected_rewards = tf.placeholder(tf.float32, shape=[None, None, 1], name="reward")
self.mask_plh = tf.placeholder(tf.float32, shape=[None, None, 1], name="mask_plh")
batch_size, num_steps = tf.shape(self.actions)[0], tf.shape(self.actions)[1]
line_indices = tf.matmul( # Line indice
tf.reshape(tf.range(0, batch_size), [-1, 1])
, tf.ones([1, num_steps], dtype=tf.int32)
)
column_indices = tf.matmul( # Column indice
tf.ones([batch_size, 1], dtype=tf.int32)
, tf.reshape(tf.range(0, num_steps), [1, -1])
)
depth_indices = tf.cast(tf.squeeze(self.actions, 2), tf.int32)
stacked_actions = tf.stack(
[line_indices, column_indices, depth_indices], 2
)
log_probs = tf.expand_dims(tf.log(tf.gather_nd(self.probs, stacked_actions)), 2)
self.policy_loss = tf.reduce_mean( - tf.reduce_sum((log_probs * (self.expected_rewards - tf.stop_gradient(self.critic_values))) * self.mask_plh, 1))
adam = tf.train.AdamOptimizer(self.lr)
self.train_policy_op = adam.minimize(self.policy_loss)
self.rewards = tf.placeholder(tf.float32, shape=[None, None, 1], name="reward")
self.next_states = tf.placeholder(tf.float32, shape=[None, None, self.critic_params['nb_inputs']], name="next_states")
with tf.variable_scope(fixed_critic_scope, reuse=True):
next_states_mat = tf.reshape(self.next_states, [-1, self.critic_params['nb_inputs']])
next_critic_values_mat = capacities.value_f(self.critic_params, next_states_mat)
next_critic_values = tf.reshape(next_critic_values_mat, [dynamic_batch_size, dynamic_num_steps, self.critic_params['nb_outputs']])
target_critics1 = tf.stop_gradient(self.rewards + self.discount * next_critic_values)
target_critics2 = self.rewards
stacked_targets = tf.stack([tf.squeeze(target_critics1, 2), tf.squeeze(target_critics2, 2)], 2)
batch_size, num_steps = tf.shape(self.next_states)[0], tf.shape(self.next_states)[1]
line_indices = tf.matmul( # Line indice
tf.reshape(tf.range(0, batch_size), [-1, 1])
, tf.ones([1, num_steps], dtype=tf.int32)
)
column_indices = tf.matmul( # Column indice
tf.ones([batch_size, 1], dtype=tf.int32)
, tf.reshape(tf.range(0, num_steps), [1, -1])
)
depth_indices = tf.cast(self.next_states[:, :, -1], tf.int32)
select_targets = tf.stack(
[line_indices, column_indices, depth_indices], 2
)
target_critics = tf.expand_dims(tf.gather_nd(stacked_targets, select_targets), 2)
self.critic_loss = 1/2 * tf.reduce_sum(tf.square(target_critics - self.critic_values) * self.mask_plh)
adam = tf.train.AdamOptimizer(self.critic_lr)
self.global_step = tf.Variable(0, trainable=False, name="global_step", collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.train_critic_op = adam.minimize(self.critic_loss, global_step=self.global_step)
self.policy_loss_plh = tf.placeholder(tf.float32, shape=[])
self.policy_loss_sum_t = tf.summary.scalar('policy_loss', self.policy_loss_plh)
self.critic_loss_plh = tf.placeholder(tf.float32, shape=[])
self.critic_loss_sum_t = tf.summary.scalar('critic_loss', self.critic_loss_plh)
# self.loss_plh = tf.placeholder(tf.float32, shape=[])
# self.loss_sum_t = tf.summary.scalar('loss', self.loss_plh)
self.all_summary_t = tf.summary.merge_all()
self.score_plh = tf.placeholder(tf.float32, shape=[])
self.score_sum_t = tf.summary.scalar('av_score', self.score_plh)
self.episode_id, self.inc_ep_id_op = capacities.counter("episode_id")
# Playing part
self.pscore_plh = tf.placeholder(tf.float32, shape=[])
self.pscore_sum_t = tf.summary.scalar('play_score', self.pscore_plh)
return graph
def collect_samples(self, env, render, nb_sequence=1):
sequence_history = []
av_score = []
for i in range(nb_sequence):
obs = env.reset()
score = 0
history = np.array([], dtype=self.memoryDt)
done = False
while True:
if render:
env.render()
act, state = self.act(obs)
next_obs, reward, done, info = env.step(act)
memory = np.array([(
state
, act
, reward
, np.concatenate((next_obs, [float(done)]))
)], dtype=self.memoryDt)
history = np.append(history, memory)
score += reward
obs = next_obs
if done:
break
sequence_history.append(history)
av_score.append(score)
self.sess.run(self.inc_ep_id_op)
summary, episode_id = self.sess.run([self.score_sum_t, self.episode_id], feed_dict={
self.score_plh: np.mean(score),
})
self.sw.add_summary(summary, episode_id)
return sequence_history, episode_id
def train_controller(self, batch):
expected_rewards = []
for i, episode_rewards in enumerate(batch['rewards']):
expected_rewards.append(get_expected_rewards(episode_rewards, self.discount))
# Fit Critic
av_critic_loss = []
for i in range(self.nb_critic_iter):
_, critic_loss = self.sess.run([self.train_critic_op, self.critic_loss], feed_dict={
self.inputs: batch['states']
, self.actions: batch['actions']
, self.expected_rewards: expected_rewards
, self.rewards: batch['rewards']
, self.mask_plh: batch['mask']
, self.next_states: batch['next_states']
})
av_critic_loss.append(critic_loss)
self.sess.run(self.update_fixed_vars_op)
_, policy_loss = self.sess.run([self.train_policy_op, self.policy_loss], feed_dict={
self.inputs: batch['states']
, self.actions: batch['actions']
, self.expected_rewards: expected_rewards
, self.rewards: batch['rewards']
, self.mask_plh: batch['mask']
, self.next_states: batch['next_states']
})
summary, _, episode_id = self.sess.run([self.all_summary_t, self.inc_ep_id_op, self.episode_id], feed_dict={
self.policy_loss_plh: policy_loss,
self.critic_loss_plh: np.mean(av_critic_loss),
})
self.sw.add_summary(summary, episode_id)
return
class A2CAgent(ActorCriticAgent):
"""
Agent implementing Advantage Actor critic using REINFORCE
"""
def set_agent_props(self):
super(A2CAgent, self).set_agent_props()
self.v_params = {
'nb_inputs': self.observation_space.shape[0] + 1
, 'nb_units': self.config['nb_units']
, 'nb_outputs': 1
, 'initial_mean': self.config['initial_mean']
, 'initial_stddev': self.config['initial_stddev']
}
self.v_lr = self.lr
def build_graph(self, graph):
with graph.as_default():
tf.set_random_seed(self.random_seed)
self.inputs = tf.placeholder(tf.float32, shape=[None, self.observation_space.shape[0] + 1], name='inputs')
policy_scope = tf.VariableScope(reuse=False, name='Policy')
with tf.variable_scope(policy_scope):
self.probs, self.actions = capacities.policy(self.policy_params, self.inputs)
self.action_t = tf.squeeze(self.actions, 1)[0]
# self.action_t = tf.Print(self.action_t, data=[self.probs, self.action_t], message="self.probs, self.action_t:")
q_scope = tf.VariableScope(reuse=False, name='QValues')
with tf.variable_scope(q_scope):
self.q_values = capacities.value_f(self.q_params, self.inputs)
v_scope = tf.VariableScope(reuse=False, name='VValues')
with tf.variable_scope(v_scope):
vs = capacities.value_f(self.v_params, self.inputs)
with tf.control_dependencies([self.probs, self.q_values, vs]):
with tf.variable_scope('Training'):
stacked_actions = tf.stack([tf.range(0, tf.shape(self.actions)[0]), tf.squeeze(self.actions, 1)], 1)
qs = tf.gather_nd(self.q_values, stacked_actions)
self.rewards = tf.placeholder(tf.float32, shape=[None], name="rewards")
self.next_states = tf.placeholder(tf.float32, shape=[None, self.observation_space.shape[0] + 1], name="next_states")
self.next_actions = tf.placeholder(tf.int32, shape=[None], name="next_actions")
with tf.variable_scope(v_scope, reuse=True):
next_vs = tf.squeeze(capacities.value_f(self.v_params, self.next_states), 1)
with tf.variable_scope('TargetVs'):
target_vs1 = tf.stop_gradient(self.rewards + self.discount * next_vs)
target_vs2 = self.rewards
stacked_targets = tf.stack([target_vs1, target_vs2], 1)
select_targets = tf.stack([tf.range(0, tf.shape(self.next_states)[0]), tf.cast(self.next_states[:, -1], tf.int32)], 1)
target_vs = tf.gather_nd(stacked_targets, select_targets)
with tf.variable_scope(q_scope, reuse=True):
next_q_values = capacities.value_f(self.q_params, self.next_states)
with tf.variable_scope('TargetQs'):
next_stacked_actions = tf.stack([tf.range(0, tf.shape(self.next_actions)[0]), self.next_actions], 1)
next_qs = tf.gather_nd(next_q_values, next_stacked_actions)
target_qs1 = tf.stop_gradient(self.rewards + self.discount * next_qs)
target_qs2 = self.rewards
stacked_targets = tf.stack([target_qs1, target_qs2], 1)
select_targets = tf.stack([tf.range(0, tf.shape(self.next_states)[0]), tf.cast(self.next_states[:, -1], tf.int32)], 1)
target_qs = tf.gather_nd(stacked_targets, select_targets)
log_probs = tf.log(tf.gather_nd(self.probs, stacked_actions))
with tf.control_dependencies([log_probs, target_qs, target_vs]):
self.v_loss = 1/2 * tf.reduce_sum(tf.square(target_vs - vs))
v_adam = tf.train.AdamOptimizer(self.v_lr)
self.v_global_step = tf.Variable(0, trainable=False, name="v_global_step")
self.v_train_op = v_adam.minimize(self.v_loss, global_step=self.v_global_step)
self.q_loss = 1/2 * tf.reduce_sum(tf.square(target_qs - qs))
q_adam = tf.train.AdamOptimizer(self.q_lr)
self.q_global_step = tf.Variable(0, trainable=False, name="q_global_step")
self.q_train_op = q_adam.minimize(self.q_loss, global_step=self.q_global_step)
advantages = qs - vs
self.policy_loss = - tf.reduce_sum(log_probs * tf.stop_gradient(advantages))
policy_adam = tf.train.AdamOptimizer(self.policy_lr)
self.policy_global_step = tf.Variable(0, trainable=False, name="policy_global_step", collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.policy_train_op = policy_adam.minimize(self.policy_loss, global_step=self.policy_global_step)
self.score_plh = tf.placeholder(tf.float32, shape=[])
self.score_sum_t = tf.summary.scalar('score', self.score_plh)
self.policy_loss_plh = tf.placeholder(tf.float32, shape=[])
self.policy_loss_sum_t = tf.summary.scalar('policy_loss', self.policy_loss_plh)
self.q_loss_plh = tf.placeholder(tf.float32, shape=[])
self.q_loss_sum_t = tf.summary.scalar('q_loss', self.q_loss_plh)
self.v_loss_plh = tf.placeholder(tf.float32, shape=[])
self.v_loss_sum_t = tf.summary.scalar('v_loss', self.v_loss_plh)
self.all_summary_t = tf.summary.merge_all()
self.episode_id, self.inc_ep_id_op = capacities.counter("episode_id")
# Playing part
self.pscore_plh = tf.placeholder(tf.float32, shape=[])
self.pscore_sum_t = tf.summary.scalar('play_score', self.pscore_plh)
return graph
def learn_from_episode(self, env, render):
obs = env.reset()
act, _ = self.act(obs)
av_policy_loss = []
av_q_loss = []
av_v_loss = []
score = 0
done = False
while True:
if render:
env.render()
next_obs, reward, done, info = env.step(act)
next_act, _ = self.act(next_obs)
# print([ np.concatenate((obs, [0])) ], act, reward, [ np.concatenate((next_obs, [1 if done else 0])) ], next_act)
_, policy_loss, _, q_loss, _, v_loss = self.sess.run([self.policy_train_op, self.policy_loss , self.q_train_op, self.q_loss, self.v_train_op, self.v_loss], feed_dict={
self.inputs: [ np.concatenate((obs, [0])) ],
self.actions: [ [act] ],
self.rewards: [ reward ],
self.next_states: [ np.concatenate((next_obs, [1 if done else 0])) ],
self.next_actions: [ next_act ]
})
av_policy_loss.append(policy_loss)
av_q_loss.append(q_loss)
av_v_loss.append(v_loss)
score += reward
obs = next_obs
act = next_act
if done:
break
summary, _, episode_id = self.sess.run([self.all_summary_t, self.inc_ep_id_op, self.episode_id], feed_dict={
self.score_plh: score,
self.policy_loss_plh: np.mean(av_policy_loss),
self.q_loss_plh: np.mean(av_q_loss),
self.v_loss_plh: np.mean(av_v_loss),
})
self.sw.add_summary(summary, episode_id)
return
class TDACAgent(PolicyAgent):
"""
Agent implementing TD Actor critic using REINFORCE
"""
def set_agent_props(self):
super(TDACAgent, self).set_agent_props()
self.v_params = {
'nb_inputs': self.observation_space.shape[0] + 1
, 'nb_units': self.config['nb_units']
, 'nb_outputs': 1
, 'initial_mean': self.config['initial_mean']
, 'initial_stddev': self.config['initial_stddev']
}
self.policy_lr = self.lr
self.v_lr = self.lr
def build_graph(self, graph):
with graph.as_default():
tf.set_random_seed(self.random_seed)
self.inputs = tf.placeholder(tf.float32, shape=[None, self.observation_space.shape[0] + 1], name='inputs')
policy_scope = tf.VariableScope(reuse=False, name='Policy')
with tf.variable_scope(policy_scope):
self.probs, self.actions = capacities.policy(self.policy_params, self.inputs)
self.action_t = tf.squeeze(self.actions, 1)[0]
# self.action_t = tf.Print(self.action_t, data=[self.probs, self.action_t], message="self.probs, self.action_t:")
v_scope = tf.VariableScope(reuse=False, name='VValues')
with tf.variable_scope(v_scope):
vs = capacities.value_f(self.v_params, self.inputs)
with tf.control_dependencies([self.probs, vs]):
with tf.variable_scope('Training'):
stacked_actions = tf.stack([tf.range(0, tf.shape(self.actions)[0]), tf.squeeze(self.actions, 1)], 1)
self.rewards = tf.placeholder(tf.float32, shape=[None], name="rewards")
self.next_states = tf.placeholder(tf.float32, shape=[None, self.observation_space.shape[0] + 1], name="next_states")
self.next_actions = tf.placeholder(tf.int32, shape=[None], name="next_actions")
with tf.variable_scope(v_scope, reuse=True):
next_vs = tf.squeeze(capacities.value_f(self.v_params, self.next_states), 1)
with tf.variable_scope('TargetVs'):
target_vs1 = tf.stop_gradient(self.rewards + self.discount * next_vs)
target_vs2 = self.rewards
stacked_targets = tf.stack([target_vs1, target_vs2], 1)
select_targets = tf.stack([tf.range(0, tf.shape(self.next_states)[0]), tf.cast(self.next_states[:, -1], tf.int32)], 1)
target_vs = tf.gather_nd(stacked_targets, select_targets)
log_probs = tf.log(tf.gather_nd(self.probs, stacked_actions))
with tf.control_dependencies([log_probs, target_vs]):
self.v_loss = 1/2 * tf.reduce_sum(tf.square(target_vs - vs))
v_adam = tf.train.AdamOptimizer(self.v_lr)
self.v_global_step = tf.Variable(0, trainable=False, name="v_global_step")
self.v_train_op = v_adam.minimize(self.v_loss, global_step=self.v_global_step)
td = target_vs - vs
self.policy_loss = - tf.reduce_sum(log_probs * tf.stop_gradient(td))
policy_adam = tf.train.AdamOptimizer(self.policy_lr)
self.policy_global_step = tf.Variable(0, trainable=False, name="policy_global_step", collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.policy_train_op = policy_adam.minimize(self.policy_loss, global_step=self.policy_global_step)
self.score_plh = tf.placeholder(tf.float32, shape=[])
self.score_sum_t = tf.summary.scalar('score', self.score_plh)
self.policy_loss_plh = tf.placeholder(tf.float32, shape=[])
self.policy_loss_sum_t = tf.summary.scalar('policy_loss', self.policy_loss_plh)
self.v_loss_plh = tf.placeholder(tf.float32, shape=[])
self.v_loss_sum_t = tf.summary.scalar('v_loss', self.v_loss_plh)
self.all_summary_t = tf.summary.merge_all()
self.episode_id, self.inc_ep_id_op = capacities.counter("episode_id")
# Playing part
self.pscore_plh = tf.placeholder(tf.float32, shape=[])
self.pscore_sum_t = tf.summary.scalar('play_score', self.pscore_plh)
return graph
def learn_from_episode(self, env, render):
obs = env.reset()
act, _ = self.act(obs)
av_policy_loss = []
av_v_loss = []
score = 0
done = False
while True:
if render:
env.render()
next_obs, reward, done, info = env.step(act)
next_act, _ = self.act(next_obs)
# print([ np.concatenate((obs, [0])) ], act, reward, [ np.concatenate((next_obs, [1 if done else 0])) ], next_act)
_, policy_loss, _, v_loss = self.sess.run([self.policy_train_op, self.policy_loss , self.v_train_op, self.v_loss], feed_dict={
self.inputs: [ np.concatenate((obs, [0])) ],
self.actions: [ [act] ],
self.rewards: [ reward ],
self.next_states: [ np.concatenate((next_obs, [1 if done else 0])) ],
self.next_actions: [ next_act ]
})
av_policy_loss.append(policy_loss)
av_v_loss.append(v_loss)
score += reward
obs = next_obs
act = next_act
if done:
break
summary, _, episode_id = self.sess.run([self.all_summary_t, self.inc_ep_id_op, self.episode_id], feed_dict={
self.score_plh: score,
self.policy_loss_plh: np.mean(av_policy_loss),
self.v_loss_plh: np.mean(av_v_loss),
})
self.sw.add_summary(summary, episode_id)
return
| {
"content_hash": "0c91ce84a89b3e76656cdca74e602394",
"timestamp": "",
"source": "github",
"line_count": 955,
"max_line_length": 179,
"avg_line_length": 46.4,
"alnum_prop": 0.5507311789131613,
"repo_name": "morgangiraud/openai-rl",
"id": "5168269a2de2f41e108e2396950d5d2ceb164674",
"size": "44312",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "agents/deep_policy_agent.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "237116"
},
{
"name": "Shell",
"bytes": "981"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import pytest
import pandas as pd
from operator import (add, sub, mul, floordiv, mod, pow, truediv, eq, ne, lt,
gt, le, ge, getitem)
from functools import partial
from datetime import datetime
import datashape
from datashape.predicates import iscollection, isscalar
from blaze import CSV, Table
from blaze.expr import (TableSymbol, projection, Field, selection, Broadcast,
join, cos, by, exp, distinct, Apply,
broadcast, eval_str, merge, common_subexpression, sum,
Label, ReLabel, Head, Sort, any, summary,
Summary, count, symbol, Field, discover,
max, min, label, Symbol, transform
)
from blaze.compatibility import PY3, builtins
from blaze.utils import raises, tmpfile
from datashape import dshape, var, int32, int64, Record, DataShape
from toolz import identity, first
import numpy as np
def test_dshape():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.dshape == dshape('var * {name: string, amount: int}')
def test_length():
t = TableSymbol('t', '10 * {name: string, amount: int}')
s = TableSymbol('s', '{name:string, amount:int}')
assert t.dshape == dshape('10 * {name: string, amount: int}')
assert len(t) == 10
assert len(t.name) == 10
assert len(t[['name']]) == 10
assert len(t.sort('name')) == 10
assert len(t.head(5)) == 5
assert len(t.head(50)) == 10
with pytest.raises(ValueError):
len(s)
def test_tablesymbol_eq():
assert not (TableSymbol('t', '{name: string}')
== TableSymbol('v', '{name: string}'))
def test_table_name():
t = TableSymbol('t', '10 * {people: string, amount: int}')
r = TableSymbol('r', 'int64')
with pytest.raises(AttributeError):
t.name
with pytest.raises(AttributeError):
r.name
def test_shape():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.shape
assert isinstance(t.shape, tuple)
assert len(t.shape) == 1
def test_eq():
assert TableSymbol('t', '{a: string, b: int}').isidentical(
TableSymbol('t', '{a: string, b: int}'))
assert not TableSymbol('t', '{b: string, a: int}').isidentical(
TableSymbol('t', '{a: string, b: int}'))
def test_arithmetic():
t = TableSymbol('t', '{x: int, y: int, z: int}')
x, y, z = t['x'], t['y'], t['z']
exprs = [x + 1, x + y, 1 + y,
x - y, 1 - x, x - 1,
x ** y, x ** 2, 2 ** x,
x * y, x ** 2, 2 ** x,
x / y, x / 2, 2 / x,
x % y, x % 2, 2 % x]
def test_column():
t = TableSymbol('t', '{name: string, amount: int}')
assert t.fields== ['name', 'amount']
assert eval(str(t.name)) == t.name
assert str(t.name) == "t.name"
with pytest.raises(AttributeError):
t.name.balance
with pytest.raises((NotImplementedError, ValueError)):
getitem(t, set('balance'))
def test_symbol_projection_failures():
t = TableSymbol('t', '10 * {name: string, amount: int}')
with pytest.raises(ValueError):
t._project(['name', 'id'])
with pytest.raises(AttributeError):
t.foo
with pytest.raises(TypeError):
t._project(t.dshape)
def test_Projection():
t = TableSymbol('t', '{name: string, amount: int, id: int32}')
p = projection(t, ['amount', 'name'])
assert p.schema == dshape('{amount: int32, name: string}')
print(t['amount'].dshape)
print(dshape('var * int32'))
assert t['amount'].dshape == dshape('var * int32')
assert t['amount']._name == 'amount'
assert eval(str(p)).isidentical(p)
assert p._project(['amount','name']) == p[['amount','name']]
with pytest.raises(ValueError):
p._project('balance')
def test_Projection_retains_shape():
t = TableSymbol('t', '5 * {name: string, amount: int, id: int32}')
assert t[['name', 'amount']].dshape == \
dshape('5 * {name: string, amount: int}')
def test_indexing():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
assert t[['amount', 'id']] == projection(t, ['amount', 'id'])
assert t['amount'].isidentical(Field(t, 'amount'))
def test_relational():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
r = (t['name'] == 'Alice')
assert 'bool' in str(r.dshape)
assert r._name
def test_selection():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
s = selection(t, t['name'] == 'Alice')
f = selection(t, t['id'] > t['amount'])
p = t[t['amount'] > 100]
with pytest.raises(ValueError):
selection(t, p)
assert s.dshape == t.dshape
def test_selection_typecheck():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
assert raises(TypeError, lambda: t[t['amount'] + t['id']])
assert raises(TypeError, lambda: t[t['name']])
def test_selection_by_indexing():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = t[t['name'] == 'Alice']
assert t.schema == result.schema
assert 'Alice' in str(result)
def test_selection_by_getattr():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = t[t.name == 'Alice']
assert t.schema == result.schema
assert 'Alice' in str(result)
def test_selection_path_check():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
t2 = t[t.name == 'Alice']
t3 = t2[t2.amount > 0]
def test_path_issue():
t = TableSymbol('t', "{topic: string, word: string, result: ?float64}")
t2 = transform(t, sizes=t.result.map(lambda x: (x - MIN)*10/(MAX - MIN),
schema='float64', name='size'))
assert builtins.any(t2.sizes.isidentical(node) for node in t2.children)
def test_getattr_doesnt_override_properties():
t = TableSymbol('t', '{_subs: string, schema: string}')
assert callable(t._subs)
assert isinstance(t.schema, DataShape)
def test_dir_contains_columns():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
result = dir(t)
columns_set = set(t.fields)
assert set(result) & columns_set == columns_set
def test_selection_consistent_children():
t = TableSymbol('t', '{name: string, amount: int, id: int}')
expr = t['name'][t['amount'] < 0]
assert list(expr.fields) == ['name']
def test_str():
import re
t = TableSymbol('t', '{name: string, amount: int, id: int}')
expr = t[t['amount'] < 0]['id'] * 2
assert '<class' not in str(expr)
assert not re.search('0x[0-9a-f]+', str(expr))
assert eval(str(expr)) == expr
assert '*' in repr(expr)
def test_join():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
r = TableSymbol('r', '{name: string, amount: int}')
q = TableSymbol('q', '{name: int}')
j = join(t, s, 'name', 'name')
assert j.schema == dshape('{name: string, amount: int, id: int}')
assert join(t, s, 'name') == join(t, s, 'name')
assert join(t, s, 'name').on_left == 'name'
assert join(t, s, 'name').on_right == 'name'
assert join(t, r, ('name', 'amount')).on_left == ['name', 'amount']
with pytest.raises(TypeError):
join(t, q, 'name')
with pytest.raises(ValueError):
join(t, s, how='upside_down')
def test_join_different_on_right_left_columns():
t = TableSymbol('t', '{x: int, y: int}')
s = TableSymbol('t', '{a: int, b: int}')
j = join(t, s, 'x', 'a')
assert j.on_left == 'x'
assert j.on_right == 'a'
def test_joined_column_first_in_schema():
t = TableSymbol('t', '{x: int, y: int, z: int}')
s = TableSymbol('s', '{w: int, y: int}')
assert join(t, s).schema == dshape('{y: int, x: int, z: int, w: int}')
def test_outer_join():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
jleft = join(t, s, 'name', 'name', how='left')
jright = join(t, s, 'name', 'name', how='right')
jinner = join(t, s, 'name', 'name', how='inner')
jouter = join(t, s, 'name', 'name', how='outer')
js = [jleft, jright, jinner, jouter]
assert len(set(js)) == 4 # not equal
assert jinner.schema == dshape('{name: string, amount: int, id: int}')
assert jleft.schema == dshape('{name: string, amount: int, id: ?int}')
assert jright.schema == dshape('{name: string, amount: ?int, id: int}')
assert jouter.schema == dshape('{name: string, amount: ?int, id: ?int}')
# Default behavior
assert join(t, s, 'name', 'name', how='inner') == \
join(t, s, 'name', 'name')
def test_join_default_shared_columns():
t = TableSymbol('t', '{name: string, amount: int}')
s = TableSymbol('t', '{name: string, id: int}')
assert join(t, s) == join(t, s, 'name', 'name')
def test_multi_column_join():
a = TableSymbol('a', '{x: int, y: int, z: int}')
b = TableSymbol('b', '{w: int, x: int, y: int}')
j = join(a, b, ['x', 'y'])
assert set(j.fields) == set('wxyz')
assert j.on_left == j.on_right == ['x', 'y']
assert hash(j)
assert j.fields == ['x', 'y', 'z', 'w']
def test_traverse():
t = TableSymbol('t', '{name: string, amount: int}')
assert t in list(t._traverse())
expr = t.amount.sum()
trav = list(expr._traverse())
assert builtins.any(t.amount.isidentical(x) for x in trav)
def test_unary_ops():
t = TableSymbol('t', '{name: string, amount: int}')
expr = cos(exp(t['amount']))
assert 'cos' in str(expr)
assert '~' in str(~(t.amount > 0))
def test_reduction():
t = TableSymbol('t', '{name: string, amount: int32}')
r = sum(t['amount'])
assert r.dshape in (dshape('int64'),
dshape('{amount: int64}'),
dshape('{amount_sum: int64}'))
assert 'amount' not in str(t.count().dshape)
assert t.count().dshape[0] in (int32, int64)
assert 'int' in str(t.count().dshape)
assert 'int' in str(t.nunique().dshape)
assert 'string' in str(t['name'].max().dshape)
assert 'string' in str(t['name'].min().dshape)
assert 'string' not in str(t.count().dshape)
t = TableSymbol('t', '{name: string, amount: real, id: int}')
assert 'int' in str(t['id'].sum().dshape)
assert 'int' not in str(t['amount'].sum().dshape)
def test_reduction_name():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert (t.amount + t.id).sum()._name
def test_max_min_class():
t = TableSymbol('t', '{name: string, amount: int32}')
assert str(max(t).dshape) == '{name: string, amount: int32}'
assert str(min(t).dshape) == '{name: string, amount: int32}'
@pytest.fixture
def symsum():
t = TableSymbol('t', '{name: string, amount: int32}')
return t, t.amount.sum()
@pytest.fixture
def ds():
return dshape("var * { "
"transaction_key : int64, "
"user_from_key : int64, "
"user_to_key : int64, "
"date : int64, "
"value : float64 "
"}")
def test_discover_dshape_symbol(ds):
t_ds = TableSymbol('t', dshape=ds)
assert t_ds.fields is not None
t_sch = TableSymbol('t', dshape=ds.subshape[0])
assert t_sch.fields is not None
assert t_ds.isidentical(t_sch)
class TestScalarArithmetic(object):
ops = {'+': add, '-': sub, '*': mul, '/': truediv, '//': floordiv, '%': mod,
'**': pow, '==': eq, '!=': ne, '<': lt, '>': gt, '<=': le, '>=': ge}
def test_scalar_arith(self, symsum):
def runner(f):
result = f(r, 1)
assert eval('r %s 1' % op).isidentical(result)
a = f(r, r)
b = eval('r %s r' % op)
assert a is b or a.isidentical(b)
result = f(1, r)
assert eval('1 %s r' % op).isidentical(result)
t, r = symsum
r = t.amount.sum()
for op, f in self.ops.items():
runner(f)
def test_scalar_usub(self, symsum):
t, r = symsum
result = -r
assert eval(str(result)).isidentical(result)
@pytest.mark.xfail
def test_scalar_uadd(self, symsum):
t, r = symsum
+r
def test_summary():
t = TableSymbol('t', '{id: int32, name: string, amount: int32}')
s = summary(total=t.amount.sum(), num=t.id.count())
assert s.dshape == dshape('{num: int32, total: int64}')
assert hash(s)
assert eval(str(s)).isidentical(s)
assert 'summary(' in str(s)
assert 'total=' in str(s)
assert 'num=' in str(s)
assert str(t.amount.sum()) in str(s)
assert not summary(total=t.amount.sum())._child.isidentical(
t.amount.sum())
assert iscollection(summary(total=t.amount.sum() + 1)._child.dshape)
def test_reduction_arithmetic():
t = TableSymbol('t', '{id: int32, name: string, amount: int32}')
expr = t.amount.sum() + 1
assert eval(str(expr)).isidentical(expr)
def test_Distinct():
t = TableSymbol('t', '{name: string, amount: int32}')
r = distinct(t['name'])
print(r.dshape)
assert r.dshape == dshape('var * string')
assert r._name == 'name'
r = t.distinct()
assert r.dshape == t.dshape
def test_by():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
r = by(t['name'], total=sum(t['amount']))
print(r.schema)
assert isinstance(r.schema[0], Record)
assert str(r.schema[0]['name']) == 'string'
def test_by_summary():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
a = by(t['name'], sum=sum(t['amount']))
b = by(t['name'], summary(sum=sum(t['amount'])))
assert a.isidentical(b)
def test_by_summary_printing():
t = symbol('t', 'var * {name: string, amount: int32, id: int32}')
assert str(by(t.name, total=sum(t.amount))) == \
'by(t.name, total=sum(t.amount))'
def test_by_columns():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert len(by(t['id'], total=t['amount'].sum()).fields) == 2
assert len(by(t['id'], count=t['id'].count()).fields) == 2
print(by(t, count=t.count()).fields)
assert len(by(t, count=t.count()).fields) == 4
def test_sort():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
s = t.sort('amount', ascending=True)
print(str(s))
assert eval(str(s)).isidentical(s)
assert s.schema == t.schema
assert t['amount'].sort().key == 'amount'
def test_head():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
s = t.head(10)
assert eval(str(s)).isidentical(s)
assert s.schema == t.schema
def test_label():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
quantity = (t['amount'] + 100).label('quantity')
assert eval(str(quantity)).isidentical(quantity)
assert quantity.fields == ['quantity']
with pytest.raises(ValueError):
quantity['balance']
def test_map_label():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
c = t.amount.map(identity, schema='int32')
assert c.label('bar')._name == 'bar'
assert c.label('bar')._child.isidentical(c._child)
def test_columns():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert list(t.fields) == ['name', 'amount', 'id']
assert list(t['name'].fields) == ['name']
(t['amount'] + 1).fields
def test_relabel():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
rl = t.relabel({'name': 'NAME', 'id': 'ID'})
rlc = t['amount'].relabel({'amount': 'BALANCE'})
assert eval(str(rl)).isidentical(rl)
print(rl.fields)
assert rl.fields == ['NAME', 'amount', 'ID']
assert not isscalar(rl.dshape.measure)
assert isscalar(rlc.dshape.measure)
def test_relabel_join():
names = TableSymbol('names', '{first: string, last: string}')
siblings = join(names.relabel({'last': 'left'}),
names.relabel({'last': 'right'}), 'first')
assert siblings.fields == ['first', 'left', 'right']
def test_map():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
inc = lambda x: x + 1
assert isscalar(t['amount'].map(inc, schema='int').dshape.measure)
s = t['amount'].map(inc, schema='{amount: int}')
assert not isscalar(s.dshape.measure)
assert s.dshape == dshape('var * {amount: int}')
expr = (t[['name', 'amount']]
.map(identity, schema='{name: string, amount: int}'))
assert expr._name is None
@pytest.mark.xfail(reason="Not sure that we should even support this")
def test_map_without_any_info():
t = TableSymbol('t', '{name: string, amount: int32, id: int32}')
assert iscolumn(t['amount'].map(inc, 'int'))
assert not iscolumn(t[['name', 'amount']].map(identity))
def test_apply():
t = Symbol('t', 'var * {name: string, amount: int32, id: int32}')
s = t['amount'].apply(sum, dshape='real')
r = t['amount'].apply(sum, dshape='3 * real')
assert s.dshape == dshape('real')
assert r.schema == dshape('real')
def test_TableSymbol_printing_is_legible():
accounts = TableSymbol('accounts', '{name: string, balance: int, id: int}')
expr = (exp(accounts.balance * 10)) + accounts['id']
assert "exp(accounts.balance * 10)" in str(expr)
assert "+ accounts.id" in str(expr)
def test_merge():
t = TableSymbol('t', 'int64')
p = TableSymbol('p', '{amount:int}')
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
new_amount = (accounts.balance * 1.5).label('new')
c = merge(accounts[['name', 'balance']], new_amount)
assert c.fields == ['name', 'balance', 'new']
assert c.schema == dshape('{name: string, balance: int32, new: float64}')
with pytest.raises(ValueError):
merge(t, t)
with pytest.raises(ValueError):
merge(t, p)
def test_merge_repeats():
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
with pytest.raises(ValueError):
merge(accounts, (accounts.balance + 1).label('balance'))
def test_merge_project():
accounts = TableSymbol('accounts',
'{name: string, balance: int32, id: int32}')
new_amount = (accounts['balance'] * 1.5).label('new')
c = merge(accounts[['name', 'balance']], new_amount)
assert c['new'].isidentical(new_amount)
assert c['name'].isidentical(accounts['name'])
assert c[['name', 'new']].isidentical(merge(accounts.name, new_amount))
inc = lambda x: x + 1
def test_subterms():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert list(a._subterms()) == [a]
assert set(a['x']._subterms()) == set([a, a['x']])
assert set(a['x'].map(inc, 'int')._subterms()) == \
set([a, a['x'], a['x'].map(inc, 'int')])
assert a in set((a['x'] + 1)._subterms())
def test_common_subexpression():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert common_subexpression(a).isidentical(a)
assert common_subexpression(a, a['x']).isidentical(a)
assert common_subexpression(a['y'] + 1, a['x']).isidentical(a)
assert common_subexpression(a['x'].map(inc, 'int'), a['x']).isidentical(a['x'])
def test_schema_of_complex_interaction():
a = TableSymbol('a', '{x: int, y: int, z: int}')
expr = (a['x'] + a['y']) / a['z']
assert expr.schema == dshape('float64')
expr = expr.label('foo')
assert expr.schema == dshape('float64')
def iscolumn(x):
return isscalar(x.dshape.measure)
def test_iscolumn():
a = TableSymbol('a', '{x: int, y: int, z: int}')
assert not iscolumn(a)
assert iscolumn(a['x'])
assert not iscolumn(a[['x', 'y']])
assert not iscolumn(a[['x']])
assert iscolumn((a['x'] + a['y']))
assert iscolumn(a['x'].distinct())
assert not iscolumn(a[['x']].distinct())
assert not iscolumn(by(a['x'], total=a['y'].sum()))
assert iscolumn(a['x'][a['x'] > 1])
assert not iscolumn(a[['x', 'y']][a['x'] > 1])
assert iscolumn(a['x'].sort())
assert not iscolumn(a[['x', 'y']].sort())
assert iscolumn(a['x'].head())
assert not iscolumn(a[['x', 'y']].head())
assert iscolumn(TableSymbol('b', 'int'))
assert not iscolumn(TableSymbol('b', '{x: int}'))
def test_discover():
schema = '{x: int, y: int, z: int}'
a = TableSymbol('a', schema)
assert discover(a) == var * schema
def test_improper_selection():
t = TableSymbol('t', '{x: int, y: int, z: int}')
assert raises(Exception, lambda: t[t['x'] > 0][t.sort()[t['y' > 0]]])
def test_serializable():
t = TableSymbol('t', '{id: int, name: string, amount: int}')
import pickle
t2 = pickle.loads(pickle.dumps(t))
assert t.isidentical(t2)
s = TableSymbol('t', '{id: int, city: string}')
expr = join(t[t.amount < 0], s).sort('id').city.head()
expr2 = pickle.loads(pickle.dumps(expr))
assert expr.isidentical(expr2)
def test_table_coercion():
from datetime import date
t = TableSymbol('t', '{name: string, amount: int, timestamp: ?date}')
assert (t.amount + '10').rhs == 10
assert (t.timestamp < '2014-12-01').rhs == date(2014, 12, 1)
def test_isnan():
from blaze import isnan
t = TableSymbol('t', '{name: string, amount: real, timestamp: ?date}')
for expr in [t.amount.isnan(), ~t.amount.isnan()]:
assert eval(str(expr)).isidentical(expr)
assert iscollection(t.amount.isnan().dshape)
assert 'bool' in str(t.amount.isnan().dshape)
def test_distinct_name():
t = TableSymbol('t', '{id: int32, name: string}')
assert t.name.isidentical(t['name'])
assert t.distinct().name.isidentical(t.distinct()['name'])
assert t.id.distinct()._name == 'id'
assert t.name._name == 'name'
def test_leaves():
t = TableSymbol('t', '{id: int32, name: string}')
v = TableSymbol('v', '{id: int32, city: string}')
x = symbol('x', 'int32')
assert t._leaves() == [t]
assert t.id._leaves() == [t]
assert by(t.name, count=t.id.nunique())._leaves() == [t]
assert join(t, v)._leaves() == [t, v]
assert join(v, t)._leaves() == [v, t]
assert (x + 1)._leaves() == [x]
@pytest.fixture
def t():
return TableSymbol('t', '{id: int, amount: float64, name: string}')
def funcname(x, y='<lambda>'):
if PY3:
return 'TestRepr.%s.<locals>.%s' % (x, y)
return 'test_table.%s' % y
class TestRepr(object):
def test_partial_lambda(self, t):
expr = t.amount.map(partial(lambda x, y: x + y, 1))
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=partial(%s, 1), "
"_asschema=None, _name0=None)" %
funcname('test_partial_lambda'))
def test_lambda(self, t):
expr = t.amount.map(lambda x: x)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=%s, _asschema=None, _name0=None)" %
funcname('test_lambda'))
def test_partial(self, t):
def myfunc(x, y):
return x + y
expr = t.amount.map(partial(myfunc, 1))
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=partial(%s, 1), "
"_asschema=None, _name0=None)" % funcname('test_partial',
'myfunc'))
def test_builtin(self, t):
expr = t.amount.map(datetime.fromtimestamp)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=datetime.fromtimestamp, _asschema=None,"
" _name0=None)")
def test_udf(self, t):
def myfunc(x):
return x + 1
expr = t.amount.map(myfunc)
s = str(expr)
assert s == ("Map(_child=t.amount, "
"func=%s, _asschema=None,"
" _name0=None)" % funcname('test_udf', 'myfunc'))
def test_nested_partial(self, t):
def myfunc(x, y, z):
return x + y + z
f = partial(partial(myfunc, 2), 1)
expr = t.amount.map(f)
s = str(expr)
assert s == ("Map(_child=t.amount, func=partial(partial(%s, 2), 1),"
" _asschema=None, _name0=None)" %
funcname('test_nested_partial', 'myfunc'))
def test_count_values():
t = TableSymbol('t', '{name: string, amount: int, city: string}')
assert t.name.count_values(sort=False).isidentical(
by(t.name, count=t.name.count()))
assert t.name.count_values(sort=True).isidentical(
by(t.name, count=t.name.count()).sort('count', ascending=False))
def test_dir():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert 'day' in dir(t.dt)
assert 'mean' not in dir(t.dt)
assert 'mean' in dir(t.amount)
assert 'like' not in dir(t[['amount', 'dt']])
assert 'any' not in dir(t.name)
def test_distinct_column():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert t.name.distinct().name.dshape == t.name.distinct().dshape
assert t.name.distinct().name.isidentical(t.name.distinct())
def test_columns_attribute_for_backwards_compatibility():
t = TableSymbol('t', '{name: string, amount: int, dt: datetime}')
assert t.columns == t.fields
assert 'columns' in dir(t)
assert 'columns' not in dir(t.name)
| {
"content_hash": "e009993276ac1a7412978545f952ea6d",
"timestamp": "",
"source": "github",
"line_count": 842,
"max_line_length": 83,
"avg_line_length": 30.340855106888363,
"alnum_prop": 0.5691079187380123,
"repo_name": "cpcloud/blaze",
"id": "81ac000c206bf113160b410770d79852650ddbfe",
"size": "25547",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "blaze/expr/tests/test_table.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "810625"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
} |
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
import json
from mock import patch
from app import create_app
from app.database import db
from app.profile.models import User
class Auth0LoginTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.client = self.app.test_client()
def test_returns_302(self):
response = self.client.get('/api/auth/login')
self.assertEqual(response.status_code, 302)
def test_redirection(self):
response = self.client.get('/api/auth/login')
self.assertIn('Redirecting...', response.data)
def test_redirected(self):
response = self.client.get('/api/auth/login')
self.assertNotEqual(response.location, 'http://localhost:5000/api/auth/login')
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
db.engine.dispose()
class AuthTokenTestCase(unittest.TestCase):
auth_token_url = '/api/auth/token'
def setUp(self):
self.app = create_app()
self.client = self.app.test_client()
with self.app.app_context():
db.drop_all()
db.create_all()
self.user = User()
self.user.user_id = 'trial_id'
self.user.email, self.user.name, self.user.secret = \
'test@test.com', 'test_user', 'super_secret'
db.session.add(self.user)
db.session.commit()
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
db.engine.dispose()
def test_throw_400_if_user_name_and_email_is_none(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': None,
'email': None
}),
content_type='application/json')
data = json.loads(rv.data)
assert rv.status_code == 400
assert data['error_code'] == 'INVALID_INPUT'
def test_throw_400_if_secret_is_none(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': 'test',
'secret': None,
}),
content_type='application/json')
assert rv.status_code == 400
data = json.loads(rv.data)
assert data['error_code'] == 'INVALID_INPUT'
def test_throw_404_if_user_id_do_not_exists(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': None,
'email': 'test1@test.com',
'secret': 'super_secret'
}),
content_type='application/json')
data = json.loads(rv.data)
self.assertEqual(rv.status_code, 404)
self.assertEqual(data['error_code'], 'USER_NOT_FOUND')
def test_throw_404_if_user_email_do_not_exists(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': 'not_found_user',
'email': None,
'secret': 'super_secret'
}),
content_type='application/json')
data = json.loads(rv.data)
self.assertEqual(rv.status_code, 404)
self.assertEqual(data['error_code'], 'USER_NOT_FOUND')
def test_throw_403_if_user_name_and_secret_key_does_not_match(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': 'test_user',
'email': None,
'secret': 'super_secret1'
}),
content_type='application/json')
data = json.loads(rv.data)
self.assertEqual(rv.status_code, 403)
self.assertEqual(data['error_code'], 'SECRET_ERROR')
def test_throw_403_if_email_and_secret_key_does_not_match(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': None,
'email': 'test@test.com',
'secret': 'super_secret1'
}),
content_type='application/json')
data = json.loads(rv.data)
self.assertEqual(rv.status_code, 403)
self.assertEqual(data['error_code'], 'SECRET_ERROR')
def test_throw_500_if_exception_occours(self):
rv = self.client.post(self.auth_token_url,
data="'username': None,",
content_type='application/json')
data = json.loads(rv.data)
self.assertEqual(rv.status_code, 500)
self.assertEqual(data['error_code'], 'GENERIC_ERROR')
def test_return_200_if_email_and_secret_matches(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': None,
'email': 'test@test.com',
'secret': 'super_secret'
}),
content_type='application/json')
self.assertEqual(rv.status_code, 200)
def test_return_200_if_user_id_and_secret_matches(self):
rv = self.client.post(self.auth_token_url,
data=json.dumps({
'username': 'test_user',
'email': None,
'secret': 'super_secret'
}),
content_type='application/json')
self.assertEqual(rv.status_code, 200)
class GetS3SignedUrlTestCase(unittest.TestCase):
url = '/api/auth/bitstore_upload'
def setUp(self):
self.app = create_app()
self.client = self.app.test_client()
def test_throw_400_if_package_is_None(self):
rv = self.client.post(self.url,
data=json.dumps({
'publisher': 'test_publisher',
'package': None,
'md5': 'm'
}),
content_type='application/json')
self.assertEqual(400, rv.status_code)
def test_throw_400_if_publisher_is_None(self):
rv = self.client.post(self.url,
data=json.dumps({
'publisher': None,
'package': 'test_package',
'md5': 'm'
}),
content_type='application/json')
self.assertEqual(400, rv.status_code)
def test_throw_400_if_md5_is_None(self):
rv = self.client.post(self.url,
data=json.dumps({
'publisher': 'test_publisher',
'package': 'test_package',
'md5': None
}),
content_type='application/json')
self.assertEqual(400, rv.status_code)
def test_throw_500_if_internal_server_errror(self):
rv = self.client.post(self.url,
content_type='application/json')
self.assertEqual(500, rv.status_code)
@patch('app.package.models.BitStore.generate_pre_signed_post_object')
def test_should_return_400_if_path_is_datapackage_json(self, signed_url):
signed_url.return_value = {'url': 'https://trial_url'}
response = self.client.post(self.url,
data=json.dumps({
'publisher': 'test_publisher',
'package': 'test_package',
'md5': 'm',
'path': "datapackage.json"
}),
content_type='application/json')
data = json.loads(response.data)
self.assertEqual(400, response.status_code)
self.assertEqual("INVALID_INPUT", data['error_code'])
@patch('app.package.models.BitStore.generate_pre_signed_post_object')
def test_200_if_all_right(self, signed_url):
signed_url.return_value = {'url': 'https://trial_url'}
response = self.client.post(self.url,
data=json.dumps({
'publisher': 'test_publisher',
'package': 'test_package',
'md5': 'm'
}),
content_type='application/json')
data = json.loads(response.data)
self.assertEqual('https://trial_url', data['data']['url'])
class CallbackHandlingTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app()
self.client = self.app.test_client()
@patch('app.auth.models.Auth0.get_user_info_with_code')
@patch('app.auth.models.Auth0.get_auth0_token')
def test_throw_500_if_error_getting_user_info_from_auth0(self, get_auth0_token, get_user):
get_user.return_value = None
get_auth0_token.return_value = None
response = self.client.get('/api/auth/callback?code=123')
self.assertTrue(get_user.called)
data = json.loads(response.data)
self.assertEqual(data['error_code'], 'GENERIC_ERROR')
self.assertEqual(response.status_code, 500)
@patch('app.auth.models.Auth0.get_auth0_token')
@patch('app.auth.models.Auth0.get_user_info_with_code')
@patch('app.auth.models.JWT.encode')
@patch('app.profile.models.User.create_or_update_user_from_callback')
def test_return_200_if_all_right(self,
create_user, jwt_helper, get_user_with_code,
get_auth0_token):
get_auth0_token.return_value = None
get_user_with_code('123').return_value = {}
jwt_helper.return_value = "132432"
create_user.return_value = User(id=1, email="abc@abc.com", name='abc', secret='12345')
response = self.client.get('/api/auth/callback?code=123')
self.assertEqual(create_user.call_count, 1)
self.assertEqual(jwt_helper.call_count, 1)
self.assertEqual(response.status_code, 200)
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
db.engine.dispose() | {
"content_hash": "eb78a9a00fcb4a0263f6cdd087295252",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 94,
"avg_line_length": 41.940520446096656,
"alnum_prop": 0.49104768658039355,
"repo_name": "subhankarb/dpr-api",
"id": "2e6ad7ea13f44d47178c34648d6c72f38e5cda56",
"size": "11306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/auth/test_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "201797"
}
],
"symlink_target": ""
} |
""" Cisco_IOS_XR_ipv4_arp_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR ipv4\-arp package configuration.
This module contains definitions
for the following management objects\:
arp\: ARP configuraiton
arpgmp\: arpgmp
arp\-redundancy\: arp redundancy
This YANG module augments the
Cisco\-IOS\-XR\-ifmgr\-cfg
module with configuration data.
Copyright (c) 2013\-2015 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class ArpEncapEnum(Enum):
"""
ArpEncapEnum
Arp encap
.. data:: ARPA = 1
Encapsulation type ARPA
.. data:: SRP = 4
Encapsulation type SRP
.. data:: SRPA = 5
Encapsulation type SRPA
.. data:: SRPB = 6
Encapsulation type SRPB
"""
ARPA = 1
SRP = 4
SRPA = 5
SRPB = 6
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpEncapEnum']
class ArpEntryEnum(Enum):
"""
ArpEntryEnum
Arp entry
.. data:: STATIC = 0
Static ARP entry type
.. data:: ALIAS = 1
Alias ARP entry type
"""
STATIC = 0
ALIAS = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpEntryEnum']
class Arp(object):
"""
ARP configuraiton
.. attribute:: inner_cos
Configure inner cos values for arp packets
**type**\: int
**range:** 0..7
.. attribute:: outer_cos
Configure outer cos values for arp packets
**type**\: int
**range:** 0..7
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.inner_cos = None
self.outer_cos = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-arp-cfg:arp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.inner_cos is not None:
return True
if self.outer_cos is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['Arp']['meta_info']
class Arpgmp(object):
"""
arpgmp
.. attribute:: vrf
Per VRF configuration, for the default VRF use 'default'
**type**\: list of :py:class:`Vrf <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.Arpgmp.Vrf>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.vrf = YList()
self.vrf.parent = self
self.vrf.name = 'vrf'
class Vrf(object):
"""
Per VRF configuration, for the default VRF use
'default'
.. attribute:: vrf_name <key>
VRF name
**type**\: str
**pattern:** [\\w\\\-\\.\:,\_@#%$\\+=\\\|;]+
.. attribute:: entries
ARP static and alias entry configuration
**type**\: :py:class:`Entries <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.Arpgmp.Vrf.Entries>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.vrf_name = None
self.entries = Arpgmp.Vrf.Entries()
self.entries.parent = self
class Entries(object):
"""
ARP static and alias entry configuration
.. attribute:: entry
ARP static and alias entry configuration item
**type**\: list of :py:class:`Entry <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.Arpgmp.Vrf.Entries.Entry>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.entry = YList()
self.entry.parent = self
self.entry.name = 'entry'
class Entry(object):
"""
ARP static and alias entry configuration item
.. attribute:: address <key>
IP Address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: encapsulation
Encapsulation type
**type**\: :py:class:`ArpEncapEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpEncapEnum>`
.. attribute:: entry_type
Entry type
**type**\: :py:class:`ArpEntryEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpEntryEnum>`
.. attribute:: mac_address
MAC Address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.address = None
self.encapsulation = None
self.entry_type = None
self.mac_address = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.address is None:
raise YPYModelError('Key property address is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-arp-cfg:entry[Cisco-IOS-XR-ipv4-arp-cfg:address = ' + str(self.address) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.address is not None:
return True
if self.encapsulation is not None:
return True
if self.entry_type is not None:
return True
if self.mac_address is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['Arpgmp.Vrf.Entries.Entry']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-arp-cfg:entries'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.entry is not None:
for child_ref in self.entry:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['Arpgmp.Vrf.Entries']['meta_info']
@property
def _common_path(self):
if self.vrf_name is None:
raise YPYModelError('Key property vrf_name is None')
return '/Cisco-IOS-XR-ipv4-arp-cfg:arpgmp/Cisco-IOS-XR-ipv4-arp-cfg:vrf[Cisco-IOS-XR-ipv4-arp-cfg:vrf-name = ' + str(self.vrf_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrf_name is not None:
return True
if self.entries is not None and self.entries._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['Arpgmp.Vrf']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-arp-cfg:arpgmp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.vrf is not None:
for child_ref in self.vrf:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['Arpgmp']['meta_info']
class ArpRedundancy(object):
"""
arp redundancy
.. attribute:: redundancy
Configure parameter for ARP Geo redundancy
**type**\: :py:class:`Redundancy <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpRedundancy.Redundancy>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.redundancy = None
class Redundancy(object):
"""
Configure parameter for ARP Geo redundancy
.. attribute:: enable
Enable Configure parameter for ARP Geo redundancy. Deletion of this object also causes deletion of all associated objects under ArpRedundancy
**type**\: :py:class:`Empty <ydk.types.Empty>`
**mandatory**\: True
.. attribute:: groups
Table of Group
**type**\: :py:class:`Groups <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpRedundancy.Redundancy.Groups>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.enable = None
self.groups = ArpRedundancy.Redundancy.Groups()
self.groups.parent = self
class Groups(object):
"""
Table of Group
.. attribute:: group
None
**type**\: list of :py:class:`Group <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpRedundancy.Redundancy.Groups.Group>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.group = YList()
self.group.parent = self
self.group.name = 'group'
class Group(object):
"""
None
.. attribute:: group_id <key>
Group ID
**type**\: int
**range:** 1..32
.. attribute:: interface_list
List of Interfaces for this Group
**type**\: :py:class:`InterfaceList <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpRedundancy.Redundancy.Groups.Group.InterfaceList>`
.. attribute:: peers
Table of Peer
**type**\: :py:class:`Peers <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpRedundancy.Redundancy.Groups.Group.Peers>`
.. attribute:: source_interface
Interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.group_id = None
self.interface_list = None
self.peers = ArpRedundancy.Redundancy.Groups.Group.Peers()
self.peers.parent = self
self.source_interface = None
class Peers(object):
"""
Table of Peer
.. attribute:: peer
None
**type**\: list of :py:class:`Peer <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpRedundancy.Redundancy.Groups.Group.Peers.Peer>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.peer = YList()
self.peer.parent = self
self.peer.name = 'peer'
class Peer(object):
"""
None
.. attribute:: prefix_string <key>
Neighbor IPv4 address
**type**\: one of the below types:
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
----
**type**\: str
**pattern:** ((\:\|[0\-9a\-fA\-F]{0,4})\:)([0\-9a\-fA\-F]{0,4}\:){0,5}((([0\-9a\-fA\-F]{0,4}\:)?(\:\|[0\-9a\-fA\-F]{0,4}))\|(((25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])\\.){3}(25[0\-5]\|2[0\-4][0\-9]\|[01]?[0\-9]?[0\-9])))(%[\\p{N}\\p{L}]+)?
----
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.prefix_string = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.prefix_string is None:
raise YPYModelError('Key property prefix_string is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-arp-cfg:peer[Cisco-IOS-XR-ipv4-arp-cfg:prefix-string = ' + str(self.prefix_string) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.prefix_string is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy.Redundancy.Groups.Group.Peers.Peer']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-arp-cfg:peers'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.peer is not None:
for child_ref in self.peer:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy.Redundancy.Groups.Group.Peers']['meta_info']
class InterfaceList(object):
"""
List of Interfaces for this Group
.. attribute:: enable
Enable List of Interfaces for this Group. Deletion of this object also causes deletion of all associated objects under InterfaceList
**type**\: :py:class:`Empty <ydk.types.Empty>`
**mandatory**\: True
.. attribute:: interfaces
Table of Interface
**type**\: :py:class:`Interfaces <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces>`
.. attribute:: _is_presence
Is present if this instance represents presence container else not
**type**\: bool
This class is a :ref:`presence class<presence-class>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self._is_presence = True
self.enable = None
self.interfaces = ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces()
self.interfaces.parent = self
class Interfaces(object):
"""
Table of Interface
.. attribute:: interface
Interface for this Group
**type**\: list of :py:class:`Interface <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ipv4_arp_cfg.ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces.Interface>`
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface = YList()
self.interface.parent = self
self.interface.name = 'interface'
class Interface(object):
"""
Interface for this Group
.. attribute:: interface_name <key>
Interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: interface_id
Interface Id for the interface
**type**\: int
**range:** 1..65535
**mandatory**\: True
"""
_prefix = 'ipv4-arp-cfg'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.interface_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
if self.interface_name is None:
raise YPYModelError('Key property interface_name is None')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-arp-cfg:interface[Cisco-IOS-XR-ipv4-arp-cfg:interface-name = ' + str(self.interface_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface_name is not None:
return True
if self.interface_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces.Interface']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-arp-cfg:interfaces'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.interface is not None:
for child_ref in self.interface:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy.Redundancy.Groups.Group.InterfaceList.Interfaces']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-ipv4-arp-cfg:interface-list'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.enable is not None:
return True
if self.interfaces is not None and self.interfaces._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy.Redundancy.Groups.Group.InterfaceList']['meta_info']
@property
def _common_path(self):
if self.group_id is None:
raise YPYModelError('Key property group_id is None')
return '/Cisco-IOS-XR-ipv4-arp-cfg:arp-redundancy/Cisco-IOS-XR-ipv4-arp-cfg:redundancy/Cisco-IOS-XR-ipv4-arp-cfg:groups/Cisco-IOS-XR-ipv4-arp-cfg:group[Cisco-IOS-XR-ipv4-arp-cfg:group-id = ' + str(self.group_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.group_id is not None:
return True
if self.interface_list is not None and self.interface_list._has_data():
return True
if self.peers is not None and self.peers._has_data():
return True
if self.source_interface is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy.Redundancy.Groups.Group']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-arp-cfg:arp-redundancy/Cisco-IOS-XR-ipv4-arp-cfg:redundancy/Cisco-IOS-XR-ipv4-arp-cfg:groups'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.group is not None:
for child_ref in self.group:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy.Redundancy.Groups']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-arp-cfg:arp-redundancy/Cisco-IOS-XR-ipv4-arp-cfg:redundancy'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self._is_presence:
return True
if self.enable is not None:
return True
if self.groups is not None and self.groups._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy.Redundancy']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-ipv4-arp-cfg:arp-redundancy'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return True
def _has_data(self):
if not self.is_config():
return False
if self.redundancy is not None and self.redundancy._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_ipv4_arp_cfg as meta
return meta._meta_table['ArpRedundancy']['meta_info']
| {
"content_hash": "889135040374af9968c4c09ee72d76d8",
"timestamp": "",
"source": "github",
"line_count": 902,
"max_line_length": 341,
"avg_line_length": 33.791574279379155,
"alnum_prop": 0.4466535433070866,
"repo_name": "abhikeshav/ydk-py",
"id": "12ead048fff05bf96216fccfaf928f8dd5d1dab9",
"size": "30480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ipv4_arp_cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117934"
}
],
"symlink_target": ""
} |
"""Read from and write to tar format archives.
"""
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
from builtins import open as bltn_open
import sys
import os
import io
import shutil
import stat
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# OSError (winerror=1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (OSError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError", "ReadError",
"CompressionError", "StreamError", "ExtractError", "HeaderError",
"ENCODING", "USTAR_FORMAT", "GNU_FORMAT", "PAX_FORMAT",
"DEFAULT_FORMAT", "open"]
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = {"path", "linkpath", "uname", "gname"}
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name == "nt":
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] in (0o200, 0o377):
n = 0
for i in range(len(s) - 1):
n <<= 8
n += s[i + 1]
if s[0] == 0o377:
n = -(256 ** (len(s) - 1) - n)
else:
try:
s = nts(s, "ascii", "strict")
n = int(s.strip() or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 or 0o377 byte indicate this
# particular encoding, the following digits-1 bytes are a big-endian
# base-256 representation. This allows values up to (256**(digits-1))-1.
# A 0o200 byte indicates a positive number, a 0o377 byte a negative
# number.
if 0 <= n < 8 ** (digits - 1):
s = bytes("%0*o" % (digits - 1, int(n)), "ascii") + NUL
elif format == GNU_FORMAT and -256 ** (digits - 1) <= n < 256 ** (digits - 1):
if n >= 0:
s = bytearray([0o200])
else:
s = bytearray([0o377])
n = 256 ** digits + n
for i in range(digits - 1):
s.insert(1, n & 0o377)
n >>= 8
else:
raise ValueError("overflow in number field")
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack_from("148B8x356B", buf))
signed_chksum = 256 + sum(struct.unpack_from("148b8x356b", buf))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None, exception=OSError, bufsize=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
bufsize = bufsize or 16 * 1024
if length == 0:
return
if length is None:
shutil.copyfileobj(src, dst, bufsize)
return
blocks, remainder = divmod(length, bufsize)
for b in range(blocks):
buf = src.read(bufsize)
if len(buf) < bufsize:
raise exception("unexpected end of data")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise exception("unexpected end of data")
dst.write(buf)
return
def filemode(mode):
"""Deprecated in this location; use stat.filemode."""
import warnings
warnings.warn("deprecated in favor of stat.filemode",
DeprecationWarning, 2)
return stat.filemode(mode)
def _safe_print(s):
encoding = getattr(sys.stdout, 'encoding', None)
if encoding is not None:
s = s.encode(encoding, 'backslashreplace').decode(encoding)
print(s, end=' ')
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile:
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream:
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
self.exception = zlib.error
else:
self._init_write_gz()
elif comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
self.exception = OSError
else:
self.cmp = bz2.BZ2Compressor()
elif comptype == "xz":
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = lzma.LZMADecompressor()
self.exception = lzma.LZMAError
else:
self.cmp = lzma.LZMACompressor()
elif comptype != "tar":
raise CompressionError("unknown compression type %r" % comptype)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
self.closed = True
try:
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
self.fileobj.write(struct.pack("<L", self.crc))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
finally:
if not self._extfileobj:
self.fileobj.close()
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except self.exception:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\x1f\x8b\x08"):
return "gz"
elif self.buf[0:3] == b"BZh" and self.buf[4:10] == b"1AY&SY":
return "bz2"
elif self.buf.startswith((b"\x5d\x00\x00\x80", b"\xfd7zXZ")):
return "xz"
else:
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
self.name = getattr(fileobj, "name", None)
self.closed = False
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def flush(self):
pass
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position, whence=io.SEEK_SET):
"""Seek to a position in the file.
"""
if whence == io.SEEK_SET:
self.position = min(max(position, 0), self.size)
elif whence == io.SEEK_CUR:
if position < 0:
self.position = max(self.position + position, 0)
else:
self.position = min(self.position + position, self.size)
elif whence == io.SEEK_END:
self.position = max(min(self.size + position, self.size), 0)
else:
raise ValueError("Invalid argument")
return self.position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
b = self.fileobj.read(length)
if len(b) != length:
raise ReadError("unexpected end of data")
buf += b
else:
buf += NUL * length
size -= length
self.position += length
return buf
def readinto(self, b):
buf = self.read(len(b))
b[:len(buf)] = buf
return len(buf)
def close(self):
self.closed = True
#class _FileInFile
class ExFileObject(io.BufferedReader):
def __init__(self, tarfile, tarinfo):
fileobj = _FileInFile(tarfile.fileobj, tarinfo.offset_data,
tarinfo.size, tarinfo.sparse)
super().__init__(fileobj)
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"].encode(encoding, errors)) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"], encoding, errors)
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"].encode(encoding, errors)) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"].encode(encoding, errors)) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf-8")
def _posix_split_name(self, name, encoding, errors):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
components = name.split("/")
for i in range(1, len(components)):
prefix = "/".join(components[:i])
name = "/".join(components[i:])
if len(prefix.encode(encoding, errors)) <= LENGTH_PREFIX and \
len(name.encode(encoding, errors)) <= LENGTH_NAME:
break
else:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf-8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf-8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf-8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf-8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf-8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf-8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf-8", "utf-8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf-8", "utf-8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The file-object for extractfile().
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None,
errorlevel=None, copybufsize=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
modes = {"r": "rb", "a": "r+b", "w": "wb", "x": "xb"}
if mode not in modes:
raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
self.mode = mode
self._mode = modes[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if (name is None and hasattr(fileobj, "name") and
isinstance(fileobj.name, (str, bytes))):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.copybufsize = copybufsize
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in ("a", "w", "x"):
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'r:xz' open for reading with lzma compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'w:xz' open for writing with lzma compression
'x' or 'x:' create a tarfile exclusively without compression, raise
an exception if the file is already created
'x:gz' create a gzip compressed tarfile, raise an exception
if the file is already created
'x:bz2' create a bzip2 compressed tarfile, raise an exception
if the file is already created
'x:xz' create an lzma compressed tarfile, raise an exception
if the file is already created
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'r|xz' open an lzma compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
'w|xz' open an lzma compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
def not_compressed(comptype):
return cls.OPEN_METH[comptype] == 'taropen'
for comptype in sorted(cls.OPEN_METH, key=not_compressed):
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError):
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in ("r", "w"):
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in ("a", "w", "x"):
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if mode not in ("r", "a", "w", "x"):
raise ValueError("mode must be 'r', 'a', 'w' or 'x'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
except OSError:
if fileobj is not None and mode == 'r':
raise ReadError("not a gzip file")
raise
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except OSError:
fileobj.close()
if mode == 'r':
raise ReadError("not a gzip file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
fileobj = bz2.BZ2File(fileobj or name, mode,
compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (OSError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not a bzip2 file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
@classmethod
def xzopen(cls, name, mode="r", fileobj=None, preset=None, **kwargs):
"""Open lzma compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if mode not in ("r", "w", "x"):
raise ValueError("mode must be 'r', 'w' or 'x'")
try:
import lzma
except ImportError:
raise CompressionError("lzma module is not available")
fileobj = lzma.LZMAFile(fileobj or name, mode, preset=preset)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (lzma.LZMAError, EOFError):
fileobj.close()
if mode == 'r':
raise ReadError("not an lzma file")
raise
except:
fileobj.close()
raise
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open", # bzip2 compressed tar
"xz": "xzopen" # lzma compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
self.closed = True
try:
if self.mode in ("a", "w", "x"):
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
finally:
if not self._extfileobj:
self.fileobj.close()
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object from the result of os.stat or equivalent
on an existing file. The file is either named by `name', or
specified as a file object `fileobj' with a file descriptor. If
given, `arcname' specifies an alternative name for the file in the
archive, otherwise, the name is taken from the 'name' attribute of
'fileobj', or the 'name' argument. The name should be a text
string.
"""
self._check("awx")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self # Not needed
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True, *, members=None):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced. `members' is optional and must be a subset of the
list returned by getmembers().
"""
self._check()
if members is None:
members = self
for tarinfo in members:
if verbose:
_safe_print(stat.filemode(tarinfo.mode))
_safe_print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid))
if tarinfo.ischr() or tarinfo.isblk():
_safe_print("%10s" %
("%d,%d" % (tarinfo.devmajor, tarinfo.devminor)))
else:
_safe_print("%10d" % tarinfo.size)
_safe_print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6])
_safe_print(tarinfo.name + ("/" if tarinfo.isdir() else ""))
if verbose:
if tarinfo.issym():
_safe_print("-> " + tarinfo.linkname)
if tarinfo.islnk():
_safe_print("link to " + tarinfo.linkname)
print()
def add(self, name, arcname=None, recursive=True, exclude=None, *, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("awx")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
with bltn_open(name, "rb") as f:
self.addfile(tarinfo, f)
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, it should be a binary file, and tarinfo.size bytes are read
from it and added to the archive. You can create TarInfo objects
directly, or by using gettarinfo().
"""
self._check("awx")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
bufsize=self.copybufsize
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size, bufsize=bufsize)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None, *, numeric_owner=False):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers(). If `numeric_owner` is True, only
the numbers for user/group names are used and not the names.
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir(),
numeric_owner=numeric_owner)
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath, numeric_owner=numeric_owner)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True, *, numeric_owner=False):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False. If `numeric_owner`
is True, only the numbers for user/group names are used and not
the names.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs,
numeric_owner=numeric_owner)
except OSError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file or a
link, an io.BufferedReader object is returned. Otherwise, None is
returned.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES:
# Members with unknown types are treated as regular files.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True,
numeric_owner=False):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath, numeric_owner)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except FileExistsError:
pass
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
bufsize = self.copybufsize
with bltn_open(targetpath, "wb") as target:
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size, ReadError, bufsize)
target.seek(tarinfo.size)
target.truncate()
else:
copyfileobj(source, target, tarinfo.size, ReadError, bufsize)
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath, numeric_owner):
"""Set owner of targetpath according to tarinfo. If numeric_owner
is True, use .gid/.uid instead of .gname/.uname.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
if numeric_owner:
g = tarinfo.gid
u = tarinfo.uid
else:
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
os.chown(targetpath, u, g)
except OSError:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except OSError:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except OSError:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Advance the file pointer.
if self.offset != self.fileobj.tell():
self.fileobj.seek(self.offset - 1)
if not self.fileobj.read(1):
raise ReadError("unexpected end of data")
# Read the next block.
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise OSError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise OSError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = "/".join(filter(None, (os.path.dirname(tarinfo.name), tarinfo.linkname)))
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
yield from self.members
return
# Yield items using TarFile's next() method.
# When all members have been read, set TarFile as _loaded.
index = 0
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will have already exhausted the next() method.
if self.firstmember is not None:
tarinfo = self.next()
index += 1
yield tarinfo
while True:
if index < len(self.members):
tarinfo = self.members[index]
elif not self._loaded:
tarinfo = self.next()
if not tarinfo:
self._loaded = True
return
else:
return
index += 1
yield tarinfo
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
open = TarFile.open
def main():
import argparse
description = 'A simple command line interface for tarfile module.'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='Verbose output')
group = parser.add_mutually_exclusive_group()
group.add_argument('-l', '--list', metavar='<tarfile>',
help='Show listing of a tarfile')
group.add_argument('-e', '--extract', nargs='+',
metavar=('<tarfile>', '<output_dir>'),
help='Extract tarfile into target dir')
group.add_argument('-c', '--create', nargs='+',
metavar=('<name>', '<file>'),
help='Create tarfile from sources')
group.add_argument('-t', '--test', metavar='<tarfile>',
help='Test if a tarfile is valid')
args = parser.parse_args()
if args.test:
src = args.test
if is_tarfile(src):
with open(src, 'r') as tar:
tar.getmembers()
print(tar.getmembers(), file=sys.stderr)
if args.verbose:
print('{!r} is a tar archive.'.format(src))
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.list:
src = args.list
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.list(verbose=args.verbose)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.extract:
if len(args.extract) == 1:
src = args.extract[0]
curdir = os.curdir
elif len(args.extract) == 2:
src, curdir = args.extract
else:
parser.exit(1, parser.format_help())
if is_tarfile(src):
with TarFile.open(src, 'r:*') as tf:
tf.extractall(path=curdir)
if args.verbose:
if curdir == '.':
msg = '{!r} file is extracted.'.format(src)
else:
msg = ('{!r} file is extracted '
'into {!r} directory.').format(src, curdir)
print(msg)
else:
parser.exit(1, '{!r} is not a tar archive.\n'.format(src))
elif args.create:
tar_name = args.create.pop(0)
_, ext = os.path.splitext(tar_name)
compressions = {
# gz
'.gz': 'gz',
'.tgz': 'gz',
# xz
'.xz': 'xz',
'.txz': 'xz',
# bz2
'.bz2': 'bz2',
'.tbz': 'bz2',
'.tbz2': 'bz2',
'.tb2': 'bz2',
}
tar_mode = 'w:' + compressions[ext] if ext in compressions else 'w'
tar_files = args.create
with TarFile.open(tar_name, tar_mode) as tf:
for file_name in tar_files:
tf.add(file_name)
if args.verbose:
print('{!r} file created.'.format(tar_name))
else:
parser.exit(1, parser.format_help())
if __name__ == '__main__':
main()
| {
"content_hash": "c8b135a9219a551b707565d3ed63861d",
"timestamp": "",
"source": "github",
"line_count": 2512,
"max_line_length": 103,
"avg_line_length": 36.47691082802548,
"alnum_prop": 0.5378806067881698,
"repo_name": "thecodinghub/news-for-good",
"id": "b78b1b1f4bb47364d0a3faaa54e89f35c03cdab3",
"size": "93021",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "news/Lib/tarfile.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3331"
},
{
"name": "CSS",
"bytes": "6436"
},
{
"name": "HTML",
"bytes": "11346"
},
{
"name": "JavaScript",
"bytes": "1243"
},
{
"name": "Python",
"bytes": "3578532"
}
],
"symlink_target": ""
} |
"""Cloud BigQuery Client for TensorFlow.
This package allows TensorFlow to interface directly with Cloud BigQuery
for high-speed data loading.
@@BigQueryClient
@@BigQueryReadSession
@@BigQueryTestClient
"""
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow_io.python.ops.bigquery_dataset_ops import BigQueryClient
from tensorflow_io.python.ops.bigquery_dataset_ops import BigQueryReadSession
from tensorflow_io.python.ops.bigquery_dataset_ops import BigQueryTestClient
_allowed_symbols = ["BigQueryClient", "BigQueryReadSession", "BigQueryTestClient"]
remove_undocumented(__name__, _allowed_symbols)
| {
"content_hash": "26e1259b432dd04a3de579e711b5de9d",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 82,
"avg_line_length": 31.85,
"alnum_prop": 0.8210361067503925,
"repo_name": "tensorflow/io",
"id": "7d524d7404b273901d93491def3da00929db2580",
"size": "1326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_io/bigquery.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1583693"
},
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "Dockerfile",
"bytes": "3938"
},
{
"name": "Go",
"bytes": "3757"
},
{
"name": "JavaScript",
"bytes": "6794"
},
{
"name": "Python",
"bytes": "1380386"
},
{
"name": "R",
"bytes": "82002"
},
{
"name": "Shell",
"bytes": "36295"
},
{
"name": "Starlark",
"bytes": "74322"
},
{
"name": "Swift",
"bytes": "19103"
}
],
"symlink_target": ""
} |
import pecan
from kds.api import config as pecan_config
from kds.api import hooks
from kds.common import config as oslo_config
CONF = oslo_config.CONF
def get_pecan_config():
# Set up the pecan configuration
filename = pecan_config.__file__.replace('.pyc', '.py')
return pecan.configuration.conf_from_file(filename)
def setup_app(config=None, extra_hooks=None):
app_hooks = [hooks.ConfigHook(),
hooks.StorageHook(),
hooks.CryptoManager()]
if extra_hooks:
app_hooks.extend(extra_hooks)
if not config:
config = get_pecan_config()
pecan.configuration.set_config(dict(config), overwrite=True)
app = pecan.make_app(
config.app.root,
static_root=config.app.static_root,
template_path=config.app.template_path,
debug=CONF.debug,
force_canonical=getattr(config.app, 'force_canonical', True),
hooks=app_hooks,
)
return app
class VersionSelectorApplication(object):
def __init__(self):
pc = get_pecan_config()
self.v1 = setup_app(config=pc)
def __call__(self, environ, start_response):
return self.v1(environ, start_response)
| {
"content_hash": "b777d75a0d4e2d495ce7b43eadb699e0",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 69,
"avg_line_length": 25.53191489361702,
"alnum_prop": 0.6466666666666666,
"repo_name": "jamielennox/openstack-kds",
"id": "ca60c7e1b69dcfb8be64aceb5074c7fd7eb47869",
"size": "1791",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kds/api/app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "219538"
}
],
"symlink_target": ""
} |
import numpy as np
import theano
import theano.tensor as T
from neupy.utils import asfloat
from neupy.core.properties import ProperFractionProperty, BoundedProperty
from neupy.algorithms.utils import iter_parameters, count_parameters
from .base import SingleStepConfigurable
__all__ = ('LeakStepAdaptation',)
class LeakStepAdaptation(SingleStepConfigurable):
""" Leak Learning Rate Adaptation algorithm for step adaptation procedure
in backpropagation algortihm. By default every layer has the same value
as ``step`` parameter in network, but after first training epoch they
must be different.
Parameters
----------
leak_size : float
Defaults to ``0.01``. This variable identified proportion, so it's
always between 0 and 1. Usualy this value is small.
alpha : float
The ``alpha`` is control total step update ratio (It's similar to
step role in weight update procedure). Defaults to ``0.001``.
Typical this value is small.
beta : float
This similar to ``alpha``, but it control ration only for update
matrix norms. Defaults to ``20``.
Typical this value is > 1.
beta : float
Warns
-----
{SingleStepConfigurable.Warns}
Examples
--------
>>> from neupy import algorithms
>>>
>>> bpnet = algorithms.GradientDescent(
... (2, 4, 1),
... addons=[algorithms.LeakStepAdaptation]
... )
>>>
.. [1] Noboru M. "Adaptive on-line learning in changing
environments", 1997
.. [2] LeCun, "Efficient BackProp", 1998
"""
leak_size = ProperFractionProperty(default=0.01)
alpha = BoundedProperty(default=0.001, minval=0)
beta = BoundedProperty(default=20, minval=0)
def init_variables(self):
super(LeakStepAdaptation, self).init_variables()
n_parameters = count_parameters(self)
self.variables.leak_average = theano.shared(
value=asfloat(np.zeros(n_parameters)),
name='leak_average'
)
def init_train_updates(self):
updates = super(LeakStepAdaptation, self).init_train_updates()
alpha = self.alpha
beta = self.beta
leak_size = self.leak_size
step = self.variables.step
leak_average = self.variables.leak_average
parameters = list(iter_parameters(self))
gradients = T.grad(self.variables.error_func, wrt=parameters)
full_gradient = T.concatenate([grad.flatten() for grad in gradients])
leak_avarage_update = (
(1 - leak_size) * leak_average + leak_size * full_gradient
)
new_step = step + alpha * step * (
beta * leak_avarage_update.norm(L=2) - step
)
updates.extend([
(leak_average, leak_avarage_update),
(step, new_step),
])
return updates
| {
"content_hash": "2c0b1eebda4aa82e313c3fbb3c133125",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 77,
"avg_line_length": 31.52747252747253,
"alnum_prop": 0.6350644823980481,
"repo_name": "stczhc/neupy",
"id": "088921bca9d3ef6a045cf874f1c4dfc52a5649ac",
"size": "2869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neupy/algorithms/steps/leak_step.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Fortran",
"bytes": "163909"
},
{
"name": "HTML",
"bytes": "5245"
},
{
"name": "JavaScript",
"bytes": "3570"
},
{
"name": "Makefile",
"bytes": "485"
},
{
"name": "Python",
"bytes": "598531"
},
{
"name": "Shell",
"bytes": "372"
}
],
"symlink_target": ""
} |
__author__ = 'Taio'
import os
import os.path
import dns.query
import dns.zone
import logging
from utils.noflib import Noflib
run = Noflib()
logger = logging.getLogger('get-dns')
logging.basicConfig(filename='/var/log/get-dns/get-dns.log', level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
dcAdd = '172.20.10.75'
domainName = 'spidc1.com'
z = dns.zone.from_xfr(dns.query.xfr(dcAdd, domainName))
names = z.nodes.keys()
names.sort()
def get_client_hosts():
with open('client_hosts', 'r') as f:
return f.read()
def print_local_host():
print '127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4'
print '::1 localhost localhost.localdomain localhost6 localhost6.localdomain6'
def print_records_stdout():
"""Print records only to stdout"""
for i in names:
line = z[i].to_text(i).split()
logger.info(line[3])
if line[3] == 'A':
logger.info(line[4])
new_line = line[4] + ' ' + line[0] + '.spidc1.com'
if new_line not in get_except_hosts():
logger.info(new_line)
print new_line
def gen_records_spidc1():
"""Write to /etc/hosts file"""
try:
with open('/etc/hosts', 'a') as f:
for i in names:
f.write(z[i].to_text(i))
except IOError:
logger.error(IOError.__doc__)
print IOError.__doc__
def get_except_hosts():
"""
Get Linux hosts file.
:return string:
"""
with open('except_hosts', 'r') as h:
return h.read()
def main():
if not os.path.exists('/var/log/get-dns'):
run.execute_get_output('sudo', 'mkdir', '/var/log/get-dns')
print_local_host()
print_records_stdout()
print get_client_hosts()
if __name__ == '__main__':
if os.getuid() == 0:
main()
else:
print 'You do not have permission, please run as root.'
exit() | {
"content_hash": "b5cd669a97dd963f1c53251d600ddeac",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 90,
"avg_line_length": 25.025316455696203,
"alnum_prop": 0.5832068791097622,
"repo_name": "jiasir/get-dns",
"id": "0ea32f2f0177e6ecd1744027dcd0fc176bbdec4b",
"size": "1999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "get-dns.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3808"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('editor', '0036_extension_runs_headless'),
]
operations = [
migrations.AlterField(
model_name='access',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_accesses', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='contributor',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='item_contributions', to=settings.AUTH_USER_MODEL),
),
]
| {
"content_hash": "1481ef3586cbdc126347a997f9059597",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 168,
"avg_line_length": 33.43478260869565,
"alnum_prop": 0.64629388816645,
"repo_name": "numbas/editor",
"id": "d18b8f1c05da675767636bc460c8ac533954f64e",
"size": "819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "editor/migrations/0037_auto_20190320_1436.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "44056"
},
{
"name": "HTML",
"bytes": "548468"
},
{
"name": "JavaScript",
"bytes": "2344000"
},
{
"name": "Less",
"bytes": "205670"
},
{
"name": "Makefile",
"bytes": "10028"
},
{
"name": "Python",
"bytes": "551931"
}
],
"symlink_target": ""
} |
import builtins as __builtins__
from brackets.translate import translate
def eval(code, *args, **kwargs):
code, debug, original = translate(code)
try:
return __builtins__.eval(code, *args, **kwargs)
except Exception as e:
exception_handler(e, debug, original)
def exec(code, *args, **kwargs):
code, debug, original = translate(code)
try:
return __builtins__.exec(code, *args, **kwargs)
except Exception as e:
exception_handler(e, debug, original)
def compile(code, *args, **kwargs):
code, debug, original = translate(code)
try:
return __builtins__.compile(code, *args, **kwargs)
except Exception as e:
exception_handler(e, debug, original)
| {
"content_hash": "085305b599ce9f52c8dcf5cf3a80aa5a",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 58,
"avg_line_length": 31.608695652173914,
"alnum_prop": 0.6437414030261348,
"repo_name": "pooya-eghbali/brackets",
"id": "05cdcde1ee615044abe9ae3bb2e92108ba3e8550",
"size": "727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "brackets/helpers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "56"
},
{
"name": "CSS",
"bytes": "23351"
},
{
"name": "HTML",
"bytes": "300292"
},
{
"name": "Python",
"bytes": "31559"
}
],
"symlink_target": ""
} |
import argparse, os
import torch
from torch.autograd import Variable
import numpy as np
import time, math
import scipy.io as sio
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description="PyTorch SRResNet Demo")
parser.add_argument("--cuda", action="store_true", help="use cuda?")
parser.add_argument("--model", default="model/model_srresnet.pth", type=str, help="model path")
parser.add_argument("--image", default="butterfly_GT", type=str, help="image name")
parser.add_argument("--dataset", default="Set5", type=str, help="dataset name")
parser.add_argument("--scale", default=4, type=int, help="scale factor, Default: 4")
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
def PSNR(pred, gt, shave_border=0):
height, width = pred.shape[:2]
pred = pred[shave_border:height - shave_border, shave_border:width - shave_border]
gt = gt[shave_border:height - shave_border, shave_border:width - shave_border]
imdff = pred - gt
rmse = math.sqrt(np.mean(imdff ** 2))
if rmse == 0:
return 100
return 20 * math.log10(255.0 / rmse)
opt = parser.parse_args()
cuda = opt.cuda
if cuda:
print("=> use gpu id: '{}'".format(opt.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
if not torch.cuda.is_available():
raise Exception("No GPU found or Wrong gpu id, please run without --cuda")
model = torch.load(opt.model)["model"]
im_gt = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_gt']
im_b = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_b']
im_l = sio.loadmat("testsets/" + opt.dataset + "/" + opt.image + ".mat")['im_l']
im_gt = im_gt.astype(float).astype(np.uint8)
im_b = im_b.astype(float).astype(np.uint8)
im_l = im_l.astype(float).astype(np.uint8)
im_input = im_l.astype(np.float32).transpose(2,0,1)
im_input = im_input.reshape(1,im_input.shape[0],im_input.shape[1],im_input.shape[2])
im_input = Variable(torch.from_numpy(im_input/255.).float())
if cuda:
model = model.cuda()
im_input = im_input.cuda()
else:
model = model.cpu()
start_time = time.time()
out = model(im_input)
elapsed_time = time.time() - start_time
out = out.cpu()
im_h = out.data[0].numpy().astype(np.float32)
im_h = im_h*255.
im_h[im_h<0] = 0
im_h[im_h>255.] = 255.
im_h = im_h.transpose(1,2,0)
print("Dataset=",opt.dataset)
print("Scale=",opt.scale)
print("It takes {}s for processing".format(elapsed_time))
fig = plt.figure()
ax = plt.subplot("131")
ax.imshow(im_gt)
ax.set_title("GT")
ax = plt.subplot("132")
ax.imshow(im_b)
ax.set_title("Input(Bicubic)")
ax = plt.subplot("133")
ax.imshow(im_h.astype(np.uint8))
ax.set_title("Output(SRResNet)")
plt.show()
| {
"content_hash": "40431944000c885dc6974363312e7922",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 95,
"avg_line_length": 33.28235294117647,
"alnum_prop": 0.6398020501944149,
"repo_name": "twtygqyy/pytorch-SRResNet",
"id": "ae24b903343328938b047dd567795d44832ef527",
"size": "2829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "5934"
},
{
"name": "Python",
"bytes": "18533"
}
],
"symlink_target": ""
} |
import json
from six.moves.urllib import parse as urllib
from tempest.api_schema.response.compute.v2_1 import migrations as schema
from tempest.common import service_client
class MigrationsClientJSON(service_client.ServiceClient):
def list_migrations(self, params=None):
"""Lists all migrations."""
url = 'os-migrations'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
self.validate_response(schema.list_migrations, resp, body)
return service_client.ResponseBodyList(resp, body['migrations'])
| {
"content_hash": "15881c1a00e10d6d271f7f86f8e680d6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 73,
"avg_line_length": 29.80952380952381,
"alnum_prop": 0.6853035143769968,
"repo_name": "neerja28/Tempest",
"id": "f708a07a19bfd174edccad3e5899e516f21b7bfe",
"size": "1235",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tempest/services/compute/json/migrations_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2670479"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
} |
import simcity.__main__ as main
import pytest
@pytest.fixture(params=['cancel', 'check', 'create', 'delete', 'init', 'run',
'scrub', 'summary', 'submit'])
def test_main_parser_init(request, argument_parser):
main.fill_argument_parser(argument_parser)
assert 'func' in argument_parser.parse_args([request.param])
| {
"content_hash": "9455c13bd8cdf14d0b80af5f59507e62",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 77,
"avg_line_length": 38.44444444444444,
"alnum_prop": 0.6560693641618497,
"repo_name": "NLeSC/sim-city-client",
"id": "b88dc250779d5860aa95904833433c5b2e4ceb78",
"size": "958",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/test_main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1225"
},
{
"name": "Python",
"bytes": "106321"
},
{
"name": "Shell",
"bytes": "800"
}
],
"symlink_target": ""
} |
import flask
internal = flask.Blueprint('internal', __name__, template_folder='templates')
from mailu.internal.views import *
| {
"content_hash": "e070c04c21b27803a1bcbb268339fe1e",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 77,
"avg_line_length": 18.571428571428573,
"alnum_prop": 0.7384615384615385,
"repo_name": "kaiyou/freeposte.io",
"id": "560f4d970a1b8196e29409ebb8f78d6f4ebecaa5",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/admin/mailu/internal/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "180"
},
{
"name": "HTML",
"bytes": "25498"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "1756"
},
{
"name": "PHP",
"bytes": "1431"
},
{
"name": "Python",
"bytes": "50818"
},
{
"name": "Shell",
"bytes": "2056"
}
],
"symlink_target": ""
} |
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machine_sizes_operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachineSizesOperations:
"""VirtualMachineSizesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def list(
self,
location: str,
**kwargs: Any
) -> "_models.VirtualMachineSizeListResult":
"""Returns supported VM Sizes in a location.
:param location: The location upon which virtual-machine-sizes is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineSizeListResult, or the result of cls(response)
:rtype: ~azure.mgmt.machinelearningservices.models.VirtualMachineSizeListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponseAutoGenerated, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineSizeListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/vmSizes'} # type: ignore
| {
"content_hash": "f38b2bf5cc11d205e97d2a307cabef6a",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 151,
"avg_line_length": 43.174418604651166,
"alnum_prop": 0.7142472394290331,
"repo_name": "Azure/azure-sdk-for-python",
"id": "4f8b3a672351f32a0733754f77a15f1e54b5aec1",
"size": "4180",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/v2021_10_01/aio/operations/_virtual_machine_sizes_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
import subprocess
from cactus.static.external import External
class ClosureJSOptimizer(External):
supported_extensions = ('js',)
output_extension = 'js'
def _run(self):
subprocess.call([
'closure-compiler',
'--js', self.src,
'--js_output_file', self.dst,
'--compilation_level', 'SIMPLE_OPTIMIZATIONS'
]) | {
"content_hash": "c808a09e2b49e1aa4fe0cbfa5a4bf844",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 57,
"avg_line_length": 25.466666666666665,
"alnum_prop": 0.5890052356020943,
"repo_name": "gone/Cactus",
"id": "98bdf88e187fd3c9fb256a6ed99915b5b363e077",
"size": "396",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "cactus/contrib/external/closure.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "261"
},
{
"name": "HTML",
"bytes": "8133"
},
{
"name": "JavaScript",
"bytes": "60"
},
{
"name": "Makefile",
"bytes": "448"
},
{
"name": "Python",
"bytes": "226407"
}
],
"symlink_target": ""
} |
from kafka.tools.protocol.responses import BaseResponse
class ListOffsetV0Response(BaseResponse):
schema = [
{'name': 'responses',
'type': 'array',
'item_type': [
{'name': 'topic', 'type': 'string'},
{'name': 'partition_responses',
'type': 'array',
'item_type': [
{'name': 'partition', 'type': 'int32'},
{'name': 'error', 'type': 'int16'},
{'name': 'offsets', 'type': 'array', 'item_type': 'int64'},
]},
]},
]
| {
"content_hash": "bb48b996c81d3896a755cbe4c1259788",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 77,
"avg_line_length": 32.05555555555556,
"alnum_prop": 0.43847487001733104,
"repo_name": "toddpalino/kafka-tools",
"id": "2cee5e08fab4f3d8bf4c756459d2e8dc941c07da",
"size": "1363",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kafka/tools/protocol/responses/list_offset_v0.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "707729"
}
],
"symlink_target": ""
} |
import os
import datetime
__basedir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
DEBUG = False
TESTING = False
SQLALCHEMY_DATABASE_URI = 'sqlite://'
SQLALCHEMY_TRACK_MODIFICATIONS = False
FLATPAGES_MARKDOWN_EXTENSIONS = []
FLATPAGES_EXTENSION = '.html.md'
SITE_NAME = 'zakwasy.zgodowie.org'
DEFAULT_LIST_PAGE_SIZE = 10
MAX_LIST_PAGE_SIZE = 100
THEMES_DIR = os.path.join('vendor', 'bootswatch')
ACCEPT_LANGUAGES = [
'pl',
'en',
]
TIME_ZONE = 'Europe/Warsaw'
USE_SESSION_FOR_NEXT = True
DEFAULT_SD_VALIDITY_DAYS = 30
# exact postal code will be searched always
SD_SEARCH_ZONES = (
10,
20,
35,
)
SD_EXPIRY_MAIL_BEFORE_DAYS = 7
SD_EXPIRY_MAIL_TIME = datetime.time(hour=11, minute=40)
RQ_REDIS_URL = 'redis://localhost:6379/0'
CACHE_DEFAULT_TIMEOUT = 7 * 24 * 60 * 60 # 7 days
CACHE_TYPE = 'redis'
CACHE_REDIS_URL = 'redis://localhost:6379/1'
MAIL_FROM_EMAIL = 'mailservice@zakwasy.zgodowie.org'
MAIL_FROM_NAME = 'Zakwasy@Zgodowie.org'
try:
from .config_local import * # noqa
except ImportError:
pass
try:
from .secrets import * # noqa
except ImportError:
pass
GOOGLE = dict(
base_url='https://www.googleapis.com/oauth2/v1/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={
'scope': [
'https://www.googleapis.com/auth/userinfo.email',
],
},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
consumer_key=GOOGLE_CONSUMER_KEY, # noqa
consumer_secret=GOOGLE_CONSUMER_SECRET, # noqa
)
FACEBOOK = dict(
base_url='https://graph.facebook.com/v2.10',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/v2.10/dialog/oauth',
consumer_key=FACEBOOK_APP_ID, # noqa
consumer_secret=FACEBOOK_APP_SECRET, # noqa
request_token_params={'scope': 'email'},
)
RQ_ASYNC = not DEBUG
| {
"content_hash": "8ee7e6cd7e6937d17339eab4e27195c3",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 71,
"avg_line_length": 22.511363636363637,
"alnum_prop": 0.6733972741039879,
"repo_name": "zgoda/zakwasy",
"id": "0913fb6f0a20e90cdfef98d981f0ba0c1a0e0c8f",
"size": "1981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zkw/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "183"
},
{
"name": "HTML",
"bytes": "20641"
},
{
"name": "Python",
"bytes": "44590"
},
{
"name": "Shell",
"bytes": "1007"
}
],
"symlink_target": ""
} |
from mpmath.libmp.libmpf import prec_to_dps
from ...core import (Add, Function, Ge, Gt, I, Integer, Le, Lt,
PrecisionExhausted)
from ...logic import false, true
###############################################################################
# ####################### FLOOR and CEILING FUNCTIONS ####################### #
###############################################################################
class RoundFunction(Function):
"""The base class for rounding functions."""
@classmethod
def eval(cls, arg):
from .complexes import im
if arg.is_integer:
return arg
if isinstance(arg, cls):
return arg
if arg.is_imaginary or (I*arg).is_extended_real:
i = im(arg)
if not i.has(I):
return cls(i)*I
return cls(arg, evaluate=False)
v = cls._eval_number(arg)
if v is not None:
return v
# Integral, numerical, symbolic part
ipart = npart = spart = Integer(0)
# Extract integral (or complex integral) terms
terms = Add.make_args(arg)
for t in terms:
if t.is_integer or (t.is_imaginary and im(t).is_integer):
ipart += t
elif t.free_symbols:
spart += t
else:
npart += t
if not (npart or spart):
return ipart
# Evaluate npart numerically if independent of spart
if npart and (not spart or npart.is_extended_real and
(spart.is_imaginary or (I*spart).is_extended_real) or
npart.is_imaginary and spart.is_extended_real):
npart_int = None
try:
from ...core.evalf import DEFAULT_MAXPREC as TARGET
prec = 10
r, i = Integer(0), Integer(0)
npart_re, npart_im = npart.as_real_imag()
while prec < TARGET:
dps = prec_to_dps(prec)
r, i = npart_re.evalf(dps), npart_im.evalf(dps)
if ((not r or int(2**prec*abs(r)) > 2**prec*abs(int(r))) and
(not i or int(2**prec*abs(i)) > 2**prec*abs(int(i)))):
npart_int = cls(r) + cls(i)*I
break
prec += 10
else:
raise PrecisionExhausted
except PrecisionExhausted:
npart_int = cls(r) + cls(i)*I
if not npart.equals(npart_int):
npart_int = None
if npart_int is not None:
ipart += npart_int
npart = Integer(0)
spart += npart
if not spart:
return ipart
elif spart.is_imaginary or (I*spart).is_extended_real:
return ipart + cls(im(spart), evaluate=False)*I
else:
return ipart + cls(spart, evaluate=False)
def _eval_is_finite(self):
return self.args[0].is_finite
def _eval_is_extended_real(self):
if self.args[0].is_extended_real:
return True
def _eval_is_integer(self):
if self.args[0].is_real:
return True
class floor(RoundFunction):
"""
Floor is a univariate function which returns the largest integer
value not greater than its argument. However this implementation
generalizes floor to complex numbers.
Examples
========
>>> floor(17)
17
>>> floor(Rational(23, 10))
2
>>> floor(2*E)
5
>>> floor(-Float(0.567))
-1
>>> floor(-I/2)
-I
See Also
========
diofant.functions.elementary.integers.ceiling
References
==========
* "Concrete mathematics" by Graham, pp. 87
* https://mathworld.wolfram.com/FloorFunction.html
"""
_dir = -1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
if arg.is_Rational:
return Integer(arg.numerator // arg.denominator)
elif arg.is_Float:
return Integer(int(arg.floor()))
else:
return arg
elif isinstance(arg, (floor, ceiling)):
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(Integer)[0]
def _eval_nseries(self, x, n, logx):
r = self.subs({x: 0})
args = self.args[0]
args0 = args.subs({x: 0})
if args0 == r:
direction = (args - args0).as_leading_term(x).as_coeff_exponent(x)[0]
if direction.is_positive:
return r
else:
return r - 1
else:
return r
def __le__(self, other):
if self.args[0] == other and other.is_extended_real:
return true
return Le(self, other, evaluate=False)
def __gt__(self, other):
if self.args[0] == other and other.is_extended_real:
return false
return Gt(self, other, evaluate=False)
def _eval_as_leading_term(self, x):
return self
class ceiling(RoundFunction):
"""
Ceiling is a univariate function which returns the smallest integer
value not less than its argument. Ceiling function is generalized
in this implementation to complex numbers.
Examples
========
>>> ceiling(17)
17
>>> ceiling(Rational(23, 10))
3
>>> ceiling(2*E)
6
>>> ceiling(-Float(0.567))
0
>>> ceiling(I/2)
I
See Also
========
diofant.functions.elementary.integers.floor
References
==========
* "Concrete mathematics" by Graham, pp. 87
* https://mathworld.wolfram.com/CeilingFunction.html
"""
_dir = 1
@classmethod
def _eval_number(cls, arg):
if arg.is_Number:
if arg.is_Rational:
return -Integer(-arg.numerator // arg.denominator)
elif arg.is_Float:
return Integer(int(arg.ceiling()))
else:
return arg
elif isinstance(arg, (ceiling, floor)):
return arg
if arg.is_NumberSymbol:
return arg.approximation_interval(Integer)[1]
def _eval_nseries(self, x, n, logx):
r = self.subs({x: 0})
args = self.args[0]
args0 = args.subs({x: 0})
if args0 == r:
direction = (args - args0).as_leading_term(x).as_coeff_exponent(x)[0]
if direction.is_positive:
return r + 1
else:
return r
else:
return r
def __lt__(self, other):
if self.args[0] == other and other.is_extended_real:
return false
return Lt(self, other, evaluate=False)
def __ge__(self, other):
if self.args[0] == other and other.is_extended_real:
return true
return Ge(self, other, evaluate=False)
| {
"content_hash": "7e2427f3d0264d5fb63dc7528dc56013",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 82,
"avg_line_length": 28.253061224489795,
"alnum_prop": 0.5057786766830396,
"repo_name": "diofant/diofant",
"id": "d85ba913fbf1c2b006d56cac728dc4500a9d1586",
"size": "6922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diofant/functions/elementary/integers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "9063539"
}
],
"symlink_target": ""
} |
"""Debugger basics"""
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
BdbQuit = 'bdb.BdbQuit' # Exception to give up completely
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self):
self.breaks = {}
self.fncache = {}
def canonic(self, filename):
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self.stopframe = None
self.returnframe = None
self.quitting = 0
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
print 'bdb.Bdb.dispatch: unknown debugging event:', `event`
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting: raise BdbQuit
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def stop_here(self, frame):
if self.stopframe is None:
return 1
if frame is self.stopframe:
return 1
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return 1
frame = frame.f_back
return 0
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not self.breaks.has_key(filename):
return 0
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
return 0
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return 1
else:
return 0
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.breaks.has_key(
self.canonic(frame.f_code.co_filename))
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_step(self):
"""Stop after one line of code."""
self.stopframe = None
self.returnframe = None
self.quitting = 0
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self.stopframe = frame
self.returnframe = None
self.quitting = 0
def set_return(self, frame):
"""Stop when returning from the given frame."""
self.stopframe = frame.f_back
self.returnframe = frame
self.quitting = 0
def set_trace(self):
"""Start debugging from here."""
try:
1 + ''
except:
frame = sys.exc_info()[2].tb_frame.f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 0
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
try:
1 + '' # raise an exception
except:
frame = sys.exc_info()[2].tb_frame.f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not self.breaks.has_key(filename):
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond)
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not self.breaks.has_key(filename):
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
if not Breakpoint.bplist.has_key((filename, lineno)):
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
self.clear_break(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not self.breaks.has_key(filename):
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return self.breaks.has_key(filename) and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return self.breaks.has_key(filename) and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if self.breaks.has_key(filename):
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = filename + '(' + `lineno` + ')'
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if frame.f_locals.has_key('__args__'):
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if frame.f_locals.has_key('__return__'):
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
try:
res = apply(func, args)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond = None):
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if self.bplist.has_key((file, line)):
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self):
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes'
else:
disp = disp + 'no '
print '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print '\tstop only if %s' % (self.cond,)
if self.ignore:
print '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end
| {
"content_hash": "b780cba3bd9c39585201b1335725e199",
"timestamp": "",
"source": "github",
"line_count": 565,
"max_line_length": 78,
"avg_line_length": 33.13274336283186,
"alnum_prop": 0.5369123931623931,
"repo_name": "ai-ku/langvis",
"id": "8f1367defff2647e9750eff996717df0f4e9e7b0",
"size": "18720",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "jython-2.1/Lib/bdb.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1391"
},
{
"name": "Haskell",
"bytes": "1005"
},
{
"name": "Java",
"bytes": "7426611"
},
{
"name": "Perl",
"bytes": "10563"
},
{
"name": "Python",
"bytes": "1744764"
},
{
"name": "Ruby",
"bytes": "691"
},
{
"name": "Shell",
"bytes": "701"
}
],
"symlink_target": ""
} |
"""
The :mod:`sklearn.grid_search` includes utilities to fine-tune the parameters
of an estimator.
"""
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from .base import BaseEstimator, is_classifier, clone
from .base import MetaEstimatorMixin
from .cross_validation import check_cv
from .cross_validation import _fit_and_score
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import check_random_state
from .utils.random import sample_without_replacement
from .utils.validation import _num_samples, indexable
from .utils.metaestimators import if_delegate_has_method
from .metrics.scorer import check_scoring
from .exceptions import ChangedBehaviorWarning
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. This module will be removed in 0.20.",
DeprecationWarning)
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterGrid` instead.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.grid_search import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
uses ``ParameterGrid`` to perform a full parallelized parameter search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.ParameterSampler` instead.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that as of SciPy 0.12, the ``scipy.stats.distributions`` do not accept
a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.grid_search import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d." % (grid_size, self.n_iter)
+ " For exhaustive searches, use GridSearchCV.")
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
params[k] = v.rvs()
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :func:`sklearn.model_selection.fit_grid_point` instead.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
@property
def classes_(self):
return self.best_estimator_.classes_
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
Notes
-----
* The long-standing behavior of this method changed in version 0.16.
* It no longer uses the metric provided by ``estimator.score`` if the
``scoring`` parameter was set when fitting.
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
if self.scoring is not None and hasattr(self.best_estimator_, 'score'):
warnings.warn("The long-standing behavior to use the estimator's "
"score function in {0}.score has changed. The "
"scoring parameter is now used."
"".format(self.__class__.__name__),
ChangedBehaviorWarning)
return self.scorer_(self.best_estimator_, X, y)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found parameters.
Only available if the underlying estimator implements ``inverse_transform`` and
``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = self.cv
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
n_samples = _num_samples(X)
X, y = indexable(X, y)
if y is not None:
if len(y) != n_samples:
raise ValueError('Target variable (y) has a different number '
'of samples (%i) than data (X: %i samples)'
% (len(y), n_samples))
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
if self.verbose > 0:
if isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(len(cv), n_candidates,
n_candidates * len(cv)))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(
delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv)
# Out is a list of triplet: score, estimator, n_test_samples
n_fits = len(out)
n_folds = len(cv)
scores = list()
grid_scores = list()
for grid_start in range(0, n_fits, n_folds):
n_test_samples = 0
score = 0
all_scores = []
for this_score, this_n_test_samples, _, parameters in \
out[grid_start:grid_start + n_folds]:
all_scores.append(this_score)
if self.iid:
this_score *= this_n_test_samples
n_test_samples += this_n_test_samples
score += this_score
if self.iid:
score /= float(n_test_samples)
else:
score /= float(n_folds)
scores.append((score, parameters))
# TODO: shall we also store the test_fold_sizes?
grid_scores.append(_CVScoreTuple(
parameters,
score,
np.array(all_scores)))
# Store the computed scores
self.grid_scores_ = grid_scores
# Find the best parameters by comparing on the mean validation score:
# note that `sorted` is deterministic in the way it breaks ties
best = sorted(grid_scores, key=lambda x: x.mean_validation_score,
reverse=True)[0]
self.best_params_ = best.parameters
self.best_score_ = best.mean_validation_score
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best.parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.GridSearchCV` instead.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
.. versionchanged:: 0.17
Upgraded to joblib 0.9.3.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, grid_search, datasets
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = grid_search.GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape='ovr', degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.cross_validation.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator, scoring, fit_params, n_jobs, iid,
refit, cv, verbose, pre_dispatch, error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
return self._fit(X, y, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.RandomizedSearchCV` instead.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs: int, default: 1 :
The maximum number of estimators fit in parallel.
- If -1 all CPUs are used.
- If 1 is given, no parallel computing code is used at all,
which is useful for debugging.
- For ``n_jobs`` below -1, ``(n_cpus + n_jobs + 1)`` are used.
For example, with ``n_jobs = -2`` all CPUs but one are used.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass,
:class:`sklearn.model_selection.StratifiedKFold` is used. In all
other cases, :class:`sklearn.model_selection.KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
grid_scores_ : list of named tuples
Contains scores for all parameter combinations in param_grid.
Each entry corresponds to one parameter setting.
Each named tuple has the attributes:
* ``parameters``, a dict of parameter settings
* ``mean_validation_score``, the mean score over the
cross-validation folds
* ``cv_validation_scores``, the list of scores for each fold
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settings, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, sampled_params)
| {
"content_hash": "83d62e14bfe8c8fc4e79cf57fc570c3c",
"timestamp": "",
"source": "github",
"line_count": 1053,
"max_line_length": 87,
"avg_line_length": 38.75403608736942,
"alnum_prop": 0.6095373456185061,
"repo_name": "mikebenfield/scikit-learn",
"id": "2f432362e37e44a1edd4eb36a4ff0bbb8fc04b42",
"size": "40808",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/grid_search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "451977"
},
{
"name": "C++",
"bytes": "140261"
},
{
"name": "Makefile",
"bytes": "1512"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "7104005"
},
{
"name": "Shell",
"bytes": "19484"
}
],
"symlink_target": ""
} |
'''
Linear Decoder for RGB images
Author: Akhil P M
Courtesy: UFLDL stanford, Siddharth Agarwal
mail: akhilpm135@gmail.com
'''
from settings import *
import h5py
class SparseAutoencoder(object):
def __init__(self, input_size, hidden_size, lambdaa, rho, beta):
""" initialize the parameters of the Autoencoder"""
self.input_size = input_size #no of input units
self.hidden_size = hidden_size #no of hidden units
self.lambdaa = lambdaa #network weight regularization factor
self.rho = rho # desired average activation of hidden units
self.beta = beta # weight of sparsity penalty term
#limits used to unroll theta into weights and biases
self.limit0 = 0
self.limit1 = hidden_size * input_size
self.limit2 = 2 * hidden_size * input_size
self.limit3 = 2 * hidden_size * input_size + hidden_size
self.limit4 = 2 * hidden_size * input_size + hidden_size + input_size
#initialize biase and weights
seed = np.random.randint(1,30000)
rand = np.random.RandomState(seed)
r = np.sqrt(6)/np.sqrt(input_size + hidden_size + 1)
W1 = np.asarray(rand.uniform(low=-r, high=r, size=(hidden_size, input_size)))
W2 = np.asarray(rand.uniform(low=-r, high=r, size=(input_size, hidden_size)))
b1 = np.zeros((hidden_size,1))
b2 = np.zeros((input_size,1))
#unroll all parameters into a single vector for optimization
self.theta = np.concatenate((W1.flatten(), W2.flatten(),
b1.flatten(), b2.flatten()))
print('======Autoencoder initialized===========')
def sparse_autoencoder_cost(self,theta,trainX):
'''computes the cost in an iteration'''
#m = no of attributes, n= no of datapoints
m,n = trainX.shape
total_cost=0.0
"""extract weights and biases from theta"""
W1 = theta[self.limit0 : self.limit1].reshape(self.hidden_size, self.input_size)
W2 = theta[self.limit1 : self.limit2].reshape(self.input_size, self.hidden_size)
b1 = theta[self.limit2 : self.limit3].reshape(self.hidden_size,1)
b2 = theta[self.limit3 : self.limit4].reshape(self.input_size,1)
"""perform a forward pass"""
act_hidden_layer = sigmoid(np.dot(W1, trainX) + b1)
act_output_layer = sigmoid(np.dot(W2, act_hidden_layer) + b2)
#act_output_layer = np.dot(W2, act_hidden_layer) + b2
"""estimate avg activation of hidden units"""
rho_avg = np.sum(act_hidden_layer, axis=1)/n
diff = act_output_layer-trainX
sum_of_squares_error = 0.5 * np.sum(np.square(diff))/n
weight_deacay = 0.5 * self.lambdaa * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
KL_divergence = self.beta * np.sum(self.rho * np.log(self.rho/rho_avg) +
(1-self.rho) * np.log((1-self.rho)/(1-rho_avg)))
total_cost = sum_of_squares_error + weight_deacay + KL_divergence
"""compute error in hidden layer and output layer"""
#delta3 = np.multiply(diff, np.multiply(act_output_layer, 1-act_output_layer))
delta3 = diff
KL_div_grad = self.beta*(-(self.rho/rho_avg) + ((1-self.rho)/(1-rho_avg)))
delta2 = np.multiply(np.dot(np.transpose(W2),delta3) +
np.transpose(np.matrix(KL_div_grad)), np.multiply(act_hidden_layer,1-act_hidden_layer))
"""compute the gradient"""
W1_grad = np.dot(delta2, np.transpose(trainX))
W2_grad = np.dot(delta3, np.transpose(act_hidden_layer))
b1_grad = np.sum(delta2, axis=1)
b2_grad = np.sum(delta3, axis=1)
W1_grad = W1_grad/n + self.lambdaa*W1
W2_grad = W2_grad/n + self.lambdaa*W2
b1_grad = b1_grad/n
b2_grad = b2_grad/n
W1_grad = np.array(W1_grad)
W2_grad = np.array(W2_grad)
b1_grad = np.array(b1_grad)
b2_grad = np.array(b2_grad)
"""unroll the gradients into a single vector"""
theta_grad = np.concatenate((W1_grad.flatten(), W2_grad.flatten(),
b1_grad.flatten(), b2_grad.flatten()))
return [total_cost,theta_grad]
def sigmoid(x):
return (1/(1 + np.exp(-x)))
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def preprocessDataset(data, num_patches, epsilon):
''' ZCA whitening is the preprocessing operation '''
""" Remove mean of dataset """
start = time.time()
mean_patch = np.mean(data, axis=1, keepdims=True)
data = data - mean_patch
sigma = np.dot(data, np.transpose(data)) / num_patches
[u, s, v] = np.linalg.svd(sigma, full_matrices=False)
rescale_factors = np.diag(1 / np.sqrt(s+epsilon))
zca_white = np.dot(np.dot(u, rescale_factors), np.transpose(u))
""" Apply ZCA Whitening to the data """
data = np.dot(zca_white, data)
print('Pre-processing took %f Minutes\n' %((time.time()-start)/60))
return data, zca_white, mean_patch
def load_dataset(num_patches, patch_size):
""" Lodad the dataset as a numpy array from a dictionary file"""
'''
images = scipy.io.loadmat('stlSampledPatches.mat')
images = np.array(images['patches'])
return images
'''
start = time.time()
images = h5py.File('unlabelled.mat')
images = images['X']
images = np.asarray(images, dtype='uint8')
m=images.shape[1]
images = np.transpose(images) # shape = 100000 * (96*96*3)
dataset = np.zeros((patch_size*patch_size, num_patches))
seed = np.random.randint(1,40000)
rand = np.random.RandomState(seed)
'''
image_indices = rand.randint(96-patch_size, size = (num_patches, 2))
image_number = rand.randint(m, size = num_patches)
rgbArray = np.zeros((96,96,3), 'uint8')
for i in xrange(num_patches):
""""get the patch indices """
index1 = image_number[i]
index2 = image_indices[i,0]
index3 = image_indices[i,1]
rgbArray[..., 0] = images[index1][0:9216].reshape(96,96)
rgbArray[..., 1] = images[index1][9216:18432].reshape(96,96)
rgbArray[..., 2] = images[index1][18432:].reshape(96,96)
""""extract patch from original image"""
patch = rgbArray[index2:index2+patch_size, index3:index3+patch_size]
temp=np.concatenate((patch[..., 0].flatten(), patch[..., 1].flatten(), patch[..., 2].flatten()))
dataset[:,i] = temp
'''
image_number = rand.randint(m, size = num_patches)
rgbArray = np.zeros((96,96,3), 'uint8')
for i in xrange(num_patches):
""""get the patch indices """
index1 = image_number[i]
rgbArray[..., 0] = images[index1][0:9216].reshape(96,96)
rgbArray[..., 1] = images[index1][9216:18432].reshape(96,96)
rgbArray[..., 2] = images[index1][18432:].reshape(96,96)
dataset[:,i] = rgb2gray(rgbArray).flatten()
dataset = dataset/263
print('Loading the dataset took %f Minutes\n' %((time.time()-start)/60))
return dataset
def visualizeW1(opt_W1, input_patch_size, hidden_patch_size):
""" Add the weights as a matrix of images """
figure, axes = plt.subplots(nrows = hidden_patch_size, ncols = hidden_patch_size)
#plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=0.1)
index = 0
for axis in axes.flat:
""" Add row of weights as an image to the plot """
image = axis.imshow(opt_W1[index, :].reshape(input_patch_size, input_patch_size),
cmap = plt.cm.gray, interpolation = 'nearest')
axis.set_frame_on(False)
axis.set_axis_off()
index += 1
""" Show the obtained plot """
plt.show()
def execute_sparse_autoencoder():
'''main function'''
"""set values for the parameters of Autoencoder"""
start = time.time()
input_patch_size = 96 #size of sampled image patches
hidden_patch_size = 40 #size of representative image patches
rho = 0.1 # sparsity parameter(desired avg activation of hidden units)
num_patches = 15000 #no of training patches
lambdaa = 0.1 #weight decay parameter
beta = 0.0001 # weight of the sparsity penalty term
max_iterations = 400 #maximum iterations for optimization
epsilon = 0.1 #regularization constant for ZCA Whitening
error = 0.0
input_size = input_patch_size * input_patch_size
hidden_size = hidden_patch_size * hidden_patch_size
"""load the dataset and preprocess it"""
data_train = load_dataset(num_patches, input_patch_size)
#data_train, zca_white, mean_patch = preprocessDataset(data_train, num_patches, epsilon)
"""initialize the Autoencoder"""
encoder = SparseAutoencoder(input_size, hidden_size, lambdaa, rho, beta)
"""do gradient checking to verify the correctness of implenentation"""
#error = scipy.optimize.check_grad(func, gradient, encoder.theta, encoder, data_train)
#print('error in gradient : %f\n' %(error))
"""do the optimization using L-BFGS algoritm"""
opt_solution = scipy.optimize.minimize(encoder.sparse_autoencoder_cost,
encoder.theta, args=(data_train,), method='L-BFGS-B',
jac=True, options={'maxiter': max_iterations, 'disp' : True})
print('optimization success : %r\n' %(opt_solution.success))
opt_theta = opt_solution.x
opt_W1 = opt_theta[encoder.limit0 : encoder.limit1].reshape(hidden_size, input_size)
opt_b1 = opt_theta[encoder.limit2 : encoder.limit3].reshape(hidden_size,1)
print('execution time(in Minutes):%f\n' %((time.time()-start)/60))
#visualizeW1(np.dot(opt_W1, zca_white), input_patch_size, hidden_patch_size)
return opt_W1, opt_b1
def extract_feature(W,b, trainX):
#print W.shape
#print b.shape
#print trainX.shape
return sigmoid(np.dot(W, trainX) + b)
if __name__ == '__main__':
execute_sparse_autoencoder()
| {
"content_hash": "dd50b098d9d6cdcc9759a788b949b55a",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 98,
"avg_line_length": 33.330882352941174,
"alnum_prop": 0.6806750496360027,
"repo_name": "akhilpm/Masters-Project",
"id": "1dd3950c778c36e548bcaafc3a824001fecc8939",
"size": "9066",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoencoderDLKM/stl10/encoderGRAY.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3311"
},
{
"name": "Python",
"bytes": "224741"
},
{
"name": "Shell",
"bytes": "133"
},
{
"name": "TeX",
"bytes": "23696"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy as scipy
import lxmls.classifiers.linear_classifier as lc
import sys
from lxmls.distributions.gaussian import *
class MultinomialNaiveBayes(lc.LinearClassifier):
def __init__(self, xtype="gaussian"):
lc.LinearClassifier.__init__(self)
self.trained = False
self.likelihood = 0
self.prior = 0
self.smooth = False
self.smooth_param = 1
def train(self, x, y):
# n_docs = no. of documents
# n_words = no. of unique words
n_docs, n_words = x.shape
# classes = a list of possible classes
classes = np.unique(y)
# n_classes = no. of classes
n_classes = np.unique(y).shape[0]
# initialization of the prior and likelihood variables
prior = np.zeros(n_classes)
likelihood = np.zeros((n_words, n_classes))
# TODO: This is where you have to write your code!
# You need to compute the values of the prior and likelihood parameters
# and place them in the variables called "prior" and "likelihood".
# Examples:
# prior[0] is the prior probability of a document being of class 0
# likelihood[4, 0] is the likelihood of the fifth(*) feature being
# active, given that the document is of class 0
# (*) recall that Python starts indices at 0, so an index of 4
# corresponds to the fifth feature!
# ----------
# Solution to Exercise 1.1
for i in range(n_classes):
docs_in_class, _ = np.nonzero(y == classes[i]) # docs_in_class = indices of documents in class i
prior[i] = 1.0 * len(docs_in_class) / n_docs # prior = fraction of documents with this class
# word_count_in_class = count of word occurrences in documents of class i
word_count_in_class = x[docs_in_class, :].sum(0)
total_words_in_class = word_count_in_class.sum() # total_words_in_class = total number of words in documents of class i
if not self.smooth:
# likelihood = count of occurrences of a word in a class
likelihood[:, i] = word_count_in_class / total_words_in_class
else:
likelihood[:, i] = (word_count_in_class+self.smooth_param) / (total_words_in_class + self.smooth_param*n_words)
# End solution to Exercise 1.1
# ----------
params = np.zeros((n_words+1, n_classes))
for i in range(n_classes):
params[0, i] = np.log(prior[i])
params[1:, i] = np.nan_to_num(np.log(likelihood[:, i]))
self.likelihood = likelihood
self.prior = prior
self.trained = True
return params
| {
"content_hash": "161144449e1f9e140dda0045ad1f898e",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 132,
"avg_line_length": 41.121212121212125,
"alnum_prop": 0.5980103168754606,
"repo_name": "LxMLS/lxmls-toolkit",
"id": "1b44db3b002f4cf7f172ae3570d0ccbbe878ef9a",
"size": "2714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lxmls/classifiers/multinomial_naive_bayes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "168589"
},
{
"name": "Perl",
"bytes": "19058"
},
{
"name": "Python",
"bytes": "386710"
},
{
"name": "Shell",
"bytes": "865"
}
],
"symlink_target": ""
} |
"""The Jersey Core libraries
"""
from jersey._version import copyright, license, version
| {
"content_hash": "c14b530d0263bd810d9fd6c180927b93",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 55,
"avg_line_length": 22.5,
"alnum_prop": 0.7555555555555555,
"repo_name": "olix0r/tx-jersey",
"id": "4ad1da18502f790cc34ea5c37bbc3f07e1301957",
"size": "90",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "53803"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/prefix-limit/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"l3vpn-ipv4-unicast",
"prefix-limit",
"config",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=True)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
max_prefixes = __builtin__.property(_get_max_prefixes, _set_max_prefixes)
prevent_teardown = __builtin__.property(
_get_prevent_teardown, _set_prevent_teardown
)
shutdown_threshold_pct = __builtin__.property(
_get_shutdown_threshold_pct, _set_shutdown_threshold_pct
)
restart_timer = __builtin__.property(_get_restart_timer, _set_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/neighbors/neighbor/afi-safis/afi-safi/l3vpn-ipv4-unicast/prefix-limit/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the prefix
limit for the AFI-SAFI
"""
__slots__ = (
"_path_helper",
"_extmethods",
"__max_prefixes",
"__prevent_teardown",
"__shutdown_threshold_pct",
"__restart_timer",
)
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"neighbors",
"neighbor",
"afi-safis",
"afi-safi",
"l3vpn-ipv4-unicast",
"prefix-limit",
"config",
]
def _get_max_prefixes(self):
"""
Getter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/max_prefixes (uint32)
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
return self.__max_prefixes
def _set_max_prefixes(self, v, load=False):
"""
Setter method for max_prefixes, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/max_prefixes (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_prefixes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_prefixes() directly.
YANG Description: Maximum number of prefixes that will be accepted
from the neighbour
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """max_prefixes must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="max-prefixes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__max_prefixes = t
if hasattr(self, "_set"):
self._set()
def _unset_max_prefixes(self):
self.__max_prefixes = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="max-prefixes",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
def _get_prevent_teardown(self):
"""
Getter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/prevent_teardown (boolean)
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
return self.__prevent_teardown
def _set_prevent_teardown(self, v, load=False):
"""
Setter method for prevent_teardown, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/prevent_teardown (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_prevent_teardown is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_prevent_teardown() directly.
YANG Description: Do not tear down the BGP session when the maximum
prefix limit is exceeded, but rather only log a
warning. The default of this leaf is false, such
that when it is not specified, the session is torn
down.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """prevent_teardown must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="prevent-teardown", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__prevent_teardown = t
if hasattr(self, "_set"):
self._set()
def _unset_prevent_teardown(self):
self.__prevent_teardown = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="prevent-teardown",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_shutdown_threshold_pct(self):
"""
Getter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
return self.__shutdown_threshold_pct
def _set_shutdown_threshold_pct(self, v, load=False):
"""
Setter method for shutdown_threshold_pct, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/shutdown_threshold_pct (oc-types:percentage)
If this variable is read-only (config: false) in the
source YANG file, then _set_shutdown_threshold_pct is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_shutdown_threshold_pct() directly.
YANG Description: Threshold on number of prefixes that can be received
from a neighbour before generation of warning messages
or log entries. Expressed as a percentage of
max-prefixes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """shutdown_threshold_pct must be of a type compatible with oc-types:percentage""",
"defined-type": "oc-types:percentage",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..100']}), is_leaf=True, yang_name="shutdown-threshold-pct", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='oc-types:percentage', is_config=True)""",
}
)
self.__shutdown_threshold_pct = t
if hasattr(self, "_set"):
self._set()
def _unset_shutdown_threshold_pct(self):
self.__shutdown_threshold_pct = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..100"]},
),
is_leaf=True,
yang_name="shutdown-threshold-pct",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="oc-types:percentage",
is_config=True,
)
def _get_restart_timer(self):
"""
Getter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/restart_timer (decimal64)
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
return self.__restart_timer
def _set_restart_timer(self, v, load=False):
"""
Setter method for restart_timer, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/restart_timer (decimal64)
If this variable is read-only (config: false) in the
source YANG file, then _set_restart_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_restart_timer() directly.
YANG Description: Time interval in seconds after which the BGP session
is re-established after being torn down due to exceeding
the max-prefix limit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """restart_timer must be of a type compatible with decimal64""",
"defined-type": "decimal64",
"generated-type": """YANGDynClass(base=RestrictedPrecisionDecimalType(precision=2), is_leaf=True, yang_name="restart-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='decimal64', is_config=True)""",
}
)
self.__restart_timer = t
if hasattr(self, "_set"):
self._set()
def _unset_restart_timer(self):
self.__restart_timer = YANGDynClass(
base=RestrictedPrecisionDecimalType(precision=2),
is_leaf=True,
yang_name="restart-timer",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="decimal64",
is_config=True,
)
max_prefixes = __builtin__.property(_get_max_prefixes, _set_max_prefixes)
prevent_teardown = __builtin__.property(
_get_prevent_teardown, _set_prevent_teardown
)
shutdown_threshold_pct = __builtin__.property(
_get_shutdown_threshold_pct, _set_shutdown_threshold_pct
)
restart_timer = __builtin__.property(_get_restart_timer, _set_restart_timer)
_pyangbind_elements = OrderedDict(
[
("max_prefixes", max_prefixes),
("prevent_teardown", prevent_teardown),
("shutdown_threshold_pct", shutdown_threshold_pct),
("restart_timer", restart_timer),
]
)
| {
"content_hash": "03f5baab8f580bece79ee485f38d9e01",
"timestamp": "",
"source": "github",
"line_count": 909,
"max_line_length": 518,
"avg_line_length": 42.50935093509351,
"alnum_prop": 0.591754871768329,
"repo_name": "napalm-automation/napalm-yang",
"id": "981e263b09e9a20db03c5d27336fdc246e5d78c6",
"size": "38665",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/neighbors/neighbor/afi_safis/afi_safi/l3vpn_ipv4_unicast/prefix_limit/config/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
} |
from fcoclient.commands.base import Command
class ProductOfferCmd(Command):
@staticmethod
def add_subparser(subparsers):
parser = subparsers.add_parser("offer", help="Inspect product offers")
subs = parser.add_subparsers()
Command.create_get_parser(subs, "product offer")
sub = Command.create_list_parser(subs, "product offers")
sub.add_argument("-t", "--type",
help="Only display offers for associated type")
return parser
@property
def resource_client(self):
return self.client.productoffer
def list(self, args):
self.logger.info("Listing product offers")
conditions = {}
if args.type is not None:
conditions["productAssociatedType"] = args.type
for po in self.client.productoffer.list(args.no_items, **conditions):
print("{}: {} ({})".format(po["productAssociatedType"], po.name,
po.uuid))
self.logger.info("Offers listed")
| {
"content_hash": "0b84975a8e7e7399a2c8a489c305e3d2",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 34.6,
"alnum_prop": 0.6059730250481695,
"repo_name": "xlab-si/fcoclient",
"id": "45f54ef46be64b8e215ecde30384cbc812e62638",
"size": "1619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fcoclient/commands/productoffer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "68835"
}
],
"symlink_target": ""
} |
import py
from prolog.interpreter import helper, term, error
from prolog.builtin.register import expose_builtin
# ___________________________________________________________________
# operators
@expose_builtin("current_op", unwrap_spec=["obj", "obj", "obj"],
handles_continuation=True)
def impl_current_op(engine, heap, precedence, typ, name, continuation):
oldstate = heap.branch()
for prec, allops in engine.getoperations():
for form, ops in allops:
for op in ops:
try:
precedence.unify(term.Number(prec), heap)
typ.unify(term.Callable.build(form), heap)
name.unify(term.Callable.build(op), heap)
return continuation.call(engine, choice_point=True)
except error.UnificationFailed:
heap.revert(oldstate)
heap.discard(oldstate)
raise error.UnificationFailed()
@expose_builtin("op", unwrap_spec=["int", "atom", "atom"])
def impl_op(engine, heap, precedence, typ, name):
from prolog.interpreter import parsing
if engine.operations is None:
engine.operations = parsing.make_default_operations()
operations = engine.operations
precedence_to_ops = {}
for prec, allops in operations:
precedence_to_ops[prec] = allops
for form, ops in allops:
try:
index = ops.index(name)
del ops[index]
except ValueError:
pass
if precedence != 0:
if precedence in precedence_to_ops:
allops = precedence_to_ops[precedence]
for form, ops in allops:
if form == typ:
ops.append(name)
break
else:
allops.append((typ, [name]))
else:
for i in range(len(operations)):
(prec, allops) = operations[i]
if precedence > prec:
operations.insert(i, (precedence, [(typ, [name])]))
break
else:
operations.append((precedence, [(typ, [name])]))
engine.parser = parsing.make_parser_at_runtime(engine.operations)
| {
"content_hash": "ed16e1f75a3c6f39f8b2bdd0a0793aa4",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 71,
"avg_line_length": 37.59322033898305,
"alnum_prop": 0.5446348061316502,
"repo_name": "cosmoharrigan/pyrolog",
"id": "56e690979f0ae1d0165e001871cc55f58c0efc98",
"size": "2218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prolog/builtin/parseraccess.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Prolog",
"bytes": "11037"
},
{
"name": "Python",
"bytes": "588612"
}
],
"symlink_target": ""
} |
__author__ = 'mworden'
"""
@package mi.dataset.parser.nutnr_b_particles
@file mi/dataset/parser/nutnr_b_particles.py
@author Mark Worden
@brief Parser for the nutnr_b_particles dataset driver
This file contains the particles that are applicable
nutnr_b_dcl_conc, nutnr_b_dcl_full and nutnr_b.
"""
from mi.core.log import get_logger
log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import \
DataParticle, \
DataParticleKey, \
DataParticleValue
class DataParticleType(BaseEnum):
NUTNR_B_DCL_CONC_INSTRUMENT_RECOVERED = 'nutnr_b_dcl_conc_instrument_recovered'
NUTNR_B_DCL_DARK_CONC_INSTRUMENT_RECOVERED = 'nutnr_b_dcl_dark_conc_instrument_recovered'
NUTNR_B_DCL_CONC_METADATA_RECOVERED = 'nutnr_b_dcl_conc_metadata_recovered'
NUTNR_B_DCL_CONC_INSTRUMENT = 'nutnr_b_dcl_conc_instrument'
NUTNR_B_DCL_DARK_CONC_INSTRUMENT = 'nutnr_b_dcl_dark_conc_instrument'
NUTNR_B_DCL_CONC_METADATA = 'nutnr_b_dcl_conc_metadata'
NUTNR_B_INSTRUMENT_RECOVERED = 'nutnr_b_instrument_recovered'
NUTNR_B_DARK_INSTRUMENT_RECOVERED = 'nutnr_b_dark_instrument_recovered'
NUTNR_B_METADATA_RECOVERED = 'nutnr_b_metadata_recovered'
NUTNR_B_DCL_FULL_INSTRUMENT = 'nutnr_b_dcl_full_instrument'
NUTNR_B_DCL_DARK_FULL_INSTRUMENT = 'nutnr_b_dcl_dark_full_instrument'
NUTNR_B_DCL_FULL_INSTRUMENT_RECOVERED = 'nutnr_b_dcl_full_instrument_recovered'
NUTNR_B_DCL_DARK_FULL_INSTRUMENT_RECOVERED = 'nutnr_b_dcl_dark_full_instrument_recovered'
NUTNR_B_DCL_FULL_METADATA = 'nutnr_b_dcl_full_metadata'
NUTNR_B_DCL_FULL_METADATA_RECOVERED = 'nutnr_b_dcl_full_metadata_recovered'
class NutnrBDataParticleKey(BaseEnum):
STARTUP_TIME = 'startup_time' # PD334
SPEC_ON_TIME = 'spec_on_time' # PD348
SPEC_POWERED_TIME = 'spec_powered_time' # PD349
LAMP_ON_TIME = 'lamp_on_time' # PD350
LAMP_POWERED_TIME = 'lamp_powered_time' # PD351
FRAME_HEADER = 'frame_header' # PD310
FRAME_TYPE = 'frame_type' # PD311
SERIAL_NUMBER = 'serial_number' # PD312
DATE_OF_SAMPLE = 'date_of_sample' # PD313
TIME_OF_SAMPLE = 'time_of_sample' # PD314
NITRATE_CONCENTRATION = 'nitrate_concentration' # PD315
AUX_FITTING_1 = 'aux_fitting_1' # PD316
AUX_FITTING_2 = 'aux_fitting_2' # PD317
AUX_FITTING_3 = 'aux_fitting_3' # PD318
RMS_ERROR = 'rms_error' # PD319
TEMP_INTERIOR = 'temp_interior' # PD320
TEMP_SPECTROMETER = 'temp_spectrometer' # PD321
TEMP_LAMP = 'temp_lamp' # PD322
LAMP_TIME = 'lamp_time' # PD347
HUMIDITY = 'humidity' # PD324
VOLTAGE_LAMP = 'voltage_lamp' # PD325
VOLTAGE_ANALOG = 'voltage_analog' # PD326
VOLTAGE_MAIN = 'voltage_main' # PD327
REF_CHANNEL_AVERAGE = 'ref_channel_average' # PD328
REF_CHANNEL_VARIANCE = 'ref_channel_variance' # PD329
SEA_WATER_DARK = 'sea_water_dark' # PD330
SPEC_CHANNEL_AVERAGE = 'spec_channel_average' # PD331
SPECTRAL_CHANNELS = 'spectral_channels' # PD332 (used in light frame particles)
DARK_FRAME_SPECTRAL_CHANNELS = 'dark_frame_spectral_channels' # PD3799 (used in dark frame particles)
DATA_LOG_FILE = 'data_log_file' # PD352
DCL_CONTROLLER_TIMESTAMP = 'dcl_controller_timestamp' # PD2605
STARTUP_TIME_STRING = 'startup_time_string' # PD2707
FIRMWARE_VERSION = 'firmware_version' # PD113
FIRMWARE_DATE = 'firmware_date' # PD293
class NutnrBMetadataRecoveredDataParticle(DataParticle):
"""
Class for generating the nutnr b metadata recovered particle.
"""
_data_particle_type = DataParticleType.NUTNR_B_METADATA_RECOVERED
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Instrument Data Particle.
"""
# Generate a particle by calling encode_value for each entry
# in the Metadata Particle Mapping table,
# where each entry is a tuple containing the particle field name,
# an index into raw_data and a function to use for data conversion.
values = []
for name, value, function in self.raw_data:
if value is not None:
values.append(self._encode_value(name, value, function))
else:
values.append({DataParticleKey.VALUE_ID: name, DataParticleKey.VALUE: None})
return values
class NutnrBInstrumentRecoveredDataParticle(DataParticle):
"""
Class for generating the nutnr b instrument recovered particle.
"""
_data_particle_type = DataParticleType.NUTNR_B_INSTRUMENT_RECOVERED
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Instrument Data Particle.
"""
# Generate a particle by calling encode_value for each entry
# in the Instrument Particle Mapping table,
# where each entry is a tuple containing the particle field name,
# an index into raw_data and a function to use for data conversion.
return [self._encode_value(name, value, function)
for name, value, function in self.raw_data]
class NutnrBDarkInstrumentRecoveredDataParticle(DataParticle):
"""
Class for generating the nutnr b instrument recovered particle.
"""
_data_particle_type = DataParticleType.NUTNR_B_DARK_INSTRUMENT_RECOVERED
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Instrument Data Particle.
"""
# Generate a particle by calling encode_value for each entry
# in the Instrument Particle Mapping table,
# where each entry is a tuple containing the particle field name,
# an index into raw_data and a function to use for data conversion.
return [self._encode_value(name, value, function)
for name, value, function in self.raw_data]
class NutnrBDclInstrumentDataParticle(DataParticle):
"""
Class for generating the nutnr b dcl instrument particle.
"""
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(NutnrBDclInstrumentDataParticle, self).__init__(raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence)
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Instrument Data Particle.
"""
# Generate a particle by calling encode_value for each entry
# in the Instrument Particle Mapping table,
# where each entry is a tuple containing the particle field name,
# an index into raw_data and a function to use for data conversion.
return [self._encode_value(name, value, function)
for name, value, function in self.raw_data]
class NutnrBDclMetadataDataParticle(DataParticle):
"""
Class for generating the nutnr_b_dcl Metadata particle.
"""
def __init__(self, raw_data,
port_timestamp=None,
internal_timestamp=None,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP,
quality_flag=DataParticleValue.OK,
new_sequence=None):
super(NutnrBDclMetadataDataParticle, self).__init__(raw_data,
port_timestamp,
internal_timestamp,
preferred_timestamp,
quality_flag,
new_sequence)
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Metadata Data Particle.
"""
# Generate a particle by calling encode_value for each entry
# in the Metadata Particle Mapping table,
# where each entry is a tuple containing the particle field name,
# an index into raw_data and a function to use for data conversion.
return [self._encode_value(name, value, function)
for name, value, function in self.raw_data]
class NutnrBDclConcRecoveredInstrumentDataParticle(NutnrBDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_CONC_INSTRUMENT_RECOVERED
class NutnrBDclDarkConcRecoveredInstrumentDataParticle(NutnrBDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_DARK_CONC_INSTRUMENT_RECOVERED
class NutnrBDclConcTelemeteredInstrumentDataParticle(NutnrBDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_CONC_INSTRUMENT
class NutnrBDclDarkConcTelemeteredInstrumentDataParticle(NutnrBDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_DARK_CONC_INSTRUMENT
class NutnrBDclConcRecoveredMetadataDataParticle(NutnrBDclMetadataDataParticle):
"""
Class for generating Metadata Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_CONC_METADATA_RECOVERED
class NutnrBDclConcTelemeteredMetadataDataParticle(NutnrBDclMetadataDataParticle):
"""
Class for generating Metadata Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_CONC_METADATA
class NutnrBDclFullRecoveredInstrumentDataParticle(NutnrBDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_FULL_INSTRUMENT_RECOVERED
class NutnrBDclDarkFullRecoveredInstrumentDataParticle(NutnrBDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_DARK_FULL_INSTRUMENT_RECOVERED
class NutnrBDclFullTelemeteredInstrumentDataParticle(NutnrBDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_FULL_INSTRUMENT
class NutnrBDclDarkFullTelemeteredInstrumentDataParticle(NutnrBDclInstrumentDataParticle):
"""
Class for generating Offset Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_DARK_FULL_INSTRUMENT
class NutnrBDclFullRecoveredMetadataDataParticle(NutnrBDclMetadataDataParticle):
"""
Class for generating Metadata Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_FULL_METADATA_RECOVERED
class NutnrBDclFullTelemeteredMetadataDataParticle(NutnrBDclMetadataDataParticle):
"""
Class for generating Metadata Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.NUTNR_B_DCL_FULL_METADATA | {
"content_hash": "1848b6104322364f6a41257cc462d43d",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 107,
"avg_line_length": 43.20205479452055,
"alnum_prop": 0.6198176773682125,
"repo_name": "JeffRoy/mi-dataset",
"id": "a29aef8853725eff60892b5eda591ee83e836fa9",
"size": "12615",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "mi/dataset/parser/nutnr_b_particles.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "3610231"
}
],
"symlink_target": ""
} |
import logging
import threading
import serial
import time
logger = logging.getLogger(__name__)
class DeviceManager(object):
def __init__(self, devices):
self.devices = devices
self.last_tick = (0, 0)
self.run_ticks = True
self.skipped = 0
self.init()
def init(self):
for dev in self.devices:
dev.reset()
def set_timebase(self, timebase):
self.timebase = timebase
def run_ticks():
ltick = None
while self.run_ticks:
t = self.timebase.tick()
if t:
if t != ltick:
self.tick(t)
self.skipped = self.timebase.difference(ltick or (0,0), t)
ltick = t[:]
time.sleep(0.0001)
self.tick_thread = threading.Thread(target=run_ticks)
self.tick_thread.daemon = True
self.tick_thread.start()
def tick(self, tick):
if tick == self.last_tick:
return None
beat, fractick = tick
if tick[0] != self.last_tick[0]:
fractick = 0
tick = (beat, fractick)
if SEND_BEATS and (fractick % FRACTICK_FRAC) == 0:
for dev in self.devices:
dev.tick(tick)
self.last_tick = tick
def reset(self):
for dev in self.devices:
self.dev.reset()
def close(self):
self.run_ticks = False
class SingleBespeckleDevice(object):
"""
Abstraction for sending data to a single Bespeckle-based device
"""
CMD_SYNC = 0x80
CMD_TICK = 0x88
CMD_RESET = 0x83
CMD_REBOOT = 0x83
CMD_MSG = 0x81
CMD_STOP = 0x82
CMD_PARAM = 0x85
def __init__(self, port, baudrate=115200):
self.ser = serial.Serial(port, baudrate)
self.addresses = {}
self.bespeckle_ids = set()
def close(self):
self.ser.close()
def raw_packet(self, data):
logger.debug("Serial Data: %s", ';'.join(map(lambda x: "{:02x}".format(x), data)))
#print ("Serial Data: %s", ';'.join(map(lambda x: "{:02x}".format(x), data)))
self.ser.write("".join([chr(d) for d in data]))
def flush(self):
self.ser.flush()
def cobs_packet(self, data):
#print ("Not encoded: %s", ';'.join(map(lambda x: "{:02x}".format(x), data)))
rdata = []
i = 0
for d in data[::-1]:
i += 1
if d == 0:
rdata.append(i)
i = 0
else:
rdata.append(d)
self.raw_packet([0, i+1] + rdata[::-1])
def framed_packet(self, data=None, flags=0x00, addr=0x00):
if data is None or len(data) > 250:
raise Exception("invalid data")
data=list(data)
while len(data) < 8:
data.append(0)
crc_frame = [addr, flags] + data
checksum = sum(crc_frame) & 0xff
frame = [len(data), checksum] + crc_frame
self.cobs_packet(frame)
def _get_next_id(self):
for i in range(256):
if i not in self.bespeckle_ids:
return i
return 0xff # Just overwrite the last effect. lololol
#def tick(self, time):
# beat, frac = time
# self.framed_packet([self.CMD_TICK, frac])
def tick(self):
self.framed_packet([self.CMD_TICK])
def sync(self, f=0):
self.framed_packet([self.CMD_SYNC, f])
def reset(self):
self.framed_packet([self.CMD_RESET])
#TODO: Send Calibration
#for i, gc in enumerate(CAN_DEVICE_CALIBRATION.get(uid, GLOBAL_CALIBRATION)):
# self.canbus.send_to_all([self.canbus.CMD_PARAM, i, int(255.0 * gc) ,0,0, 0,0,0])
self.bespeckle_ids = set()
def bespeckle_add_effect(self, bespeckle_class, data=None):
if data is None:
data = []
bespeckle_id = self._get_next_id()
self.bespeckle_ids.add(bespeckle_id)
self.framed_packet([bespeckle_class, bespeckle_id] + list(data))
return bespeckle_id
def bespeckle_pop_effect(self, bespeckle_id):
if bespeckle_id in self.bespeckle_ids:
self.bespeckle_ids.discard(bespeckle_id)
self.framed_packet([self.CMD_STOP, bespeckle_id])
return True
def bespeckle_msg_effect(self, bespeckle_id, data=None):
if data is None:
data = []
self.framed_packet([self.CMD_MSG, bespeckle_id] + list(data))
return bespeckle_id
class FakeSingleBespeckleDevice(SingleBespeckleDevice):
def __init__(self, *args, **kwargs):
self.addresses = {}
self.bespeckle_ids = set()
def raw_packet(self, data):
logger.debug("Data: %s", ';'.join(map(lambda x: "{:02x}".format(x), data)))
time.sleep(0.001)
| {
"content_hash": "5b5a919c35a2e0e76d350bcaf3818db9",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 93,
"avg_line_length": 29.876543209876544,
"alnum_prop": 0.5427685950413224,
"repo_name": "zbanks/beetle",
"id": "8dcdc41e7b7dd52ee9260c7e24036992492ed994",
"size": "4840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devices.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "67758"
},
{
"name": "Python",
"bytes": "46924"
}
],
"symlink_target": ""
} |
import thread
import time
import random
import sys
import os
participants = [
# Lista uczestników zarejestrowanych
]
def input_thread(L):
raw_input()
L.append(None)
def the_winer_is():
L = []
thread.start_new_thread(input_thread, (L,))
while 1:
time.sleep(.005)
os.system('clear')
if L:
print(u"The winer is:\n\n\t{0}\n".format(random.choice(participants)))
break
print(random.choice(participants))
if __name__ == '__main__':
the_winer_is()
| {
"content_hash": "08fb05c9538487a201a9f35b53d184d4",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 82,
"avg_line_length": 14.394736842105264,
"alnum_prop": 0.5740402193784278,
"repo_name": "PyStok/PyStok-1",
"id": "dc669a45ee66b9775bada0106e1c53f3967a7864",
"size": "573",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Skrypt konkursowy/pystok_konkurs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "573"
}
],
"symlink_target": ""
} |
prompt = '> '
def parse_arguments(arguments):
if arguments['init:config']:
print(arguments)
if arguments['init:db']:
from lightningwolf_smp.utils.creator import generate
generate()
print("Tables in Database created")
if arguments['user:create']:
import getpass
from lightningwolf_smp.application import db
from lightningwolf_smp.models.user_query import UserQuery
uq = UserQuery(db=db)
username = arguments['<username>']
valid_username = False
while not valid_username:
if not uq.is_unique_user(username):
print("This username %s is not unique in system. Try again" % username)
username = raw_input(prompt)
else:
valid_username = True
print("Unique e-mail address")
valid_email = False
while not valid_email:
email = raw_input(prompt)
if not uq.is_valid_email(email):
print("This e-mail: %s is not valid. Try again" % email)
else:
if not uq.is_unique_email(email):
print("This e-mail: %s is not unique in system. Try again" % email)
else:
valid_email = True
password = getpass.getpass()
user_return = uq.create_user(
username=username,
email=email,
password=password,
credential='user',
active=True,
cli=True,
)
if user_return is True:
print("User created")
else:
print(user_return)
if arguments['user:password']:
import getpass
from lightningwolf_smp.application import db
from lightningwolf_smp.models.user_query import UserQuery
uq = UserQuery(db=db)
username = arguments['<username>']
user = uq.get_user_by_username(username=username)
if user:
password = getpass.getpass()
uq.edit_password(user, password)
print("Password for user: %s changed." % username)
else:
print("This username %s not exists in system." % username)
if arguments['user:promote']:
from lightningwolf_smp.application import db
from lightningwolf_smp.models.user_query import UserQuery
uq = UserQuery(db=db)
username = arguments['<username>']
user = uq.get_user_by_username(username=username)
if user:
uq.promote(user)
print("User: %s promoted." % username)
else:
print("This username %s not exists in system." % username)
| {
"content_hash": "f3b022b344acd2c3d22ba3bbd1992087",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 87,
"avg_line_length": 34.701298701298704,
"alnum_prop": 0.5669910179640718,
"repo_name": "lightningwolf/lightningwolf-smp",
"id": "ba98fe10ac486d86077e208c82ededf79a84d2bc",
"size": "2710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lightningwolf_smp/utils/console.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1133"
},
{
"name": "Python",
"bytes": "69196"
},
{
"name": "Shell",
"bytes": "5473"
}
],
"symlink_target": ""
} |
import unittest
#from pyvision.analysis.stats import cibinom
#from pyvision.analysis.Table import Table
import pyvision as pv
class ConfusionMatrix(pv.Table):
def __init__(self, algorithm_name=None, test_name=None):
pv.Table.__init__(self,default_value=0)
self.algorithm_name = algorithm_name
self.test_name = test_name
self.classes = set()
self.successes = 0
self.failures = 0
self.total = 0
self.setColumnFormat('Rate',"%0.4f")
self.setColumnFormat('Lower',"%0.4f")
self.setColumnFormat('Upper',"%0.4f")
self.setColumnFormat('Bar',"%-10s")
def addData(self, truth, prediction, weight=1):
"""
Add data to a confusion matrix. "truth" is the true/correct and
"prediction" is the output of the classifier. Typically you would
build this matrix one "test" at a time. To add multiple test at a
time you can use the weight to populate the data more quickly.
"""
self.classes.add(truth)
self.classes.add(prediction)
self.accumulateData(truth,prediction,weight)
if truth == prediction:
self.successes += weight
else:
self.failures += weight
self.total += weight
def update_rate(self):
'''Returns a point estimate of the probability of success'''
return float(self.successes)/float(self.total)
def confidenceInterval(self,alpha=0.05):
'''
Returns the estimated a confidence interval for the success update_rate by
modeling the success update_rate as a binomial distribution.
'''
return pv.cibinom(self.total,self.successes,alpha=alpha)
def computeRates(self,alpha=0.05):
'''
Populates the distance matrix with more information such as
recognition rates for each row. Call this only after all of the
data has been added.
'''
self.row_headers.sort()
self.col_headers.sort()
for row in self.classes:
successes = 0
total = 0
for col in self.classes:
total += self.element(row,col)
if row == col:
successes += self.element(row,col)
rate = float(successes)/total
self.setData(row,'Rate',rate)
self.setData(row,'Bar',"#"*int(10*rate+0.5))
self.setData(row,'Lower',pv.cibinom(total,successes,alpha)[0])
self.setData(row,'Upper',pv.cibinom(total,successes,alpha)[1])
for col in self.classes:
successes = 0
total = 0
for row in self.classes:
total += self.element(row,col)
if row == col:
successes += self.element(row,col)
rate = float(successes)/total
self.setData('Total',col,"%0.4f"%rate)
self.setData('Total','Rate',self.update_rate())
self.setData('Total','Bar',"#"*int(10*self.update_rate()+0.5))
self.setData('Total','Lower',self.confidenceInterval(alpha)[0])
self.setData('Total','Upper',self.confidenceInterval(alpha)[1])
class _TestConfusionMatrix(unittest.TestCase):
def setUp(self):
color = ConfusionMatrix()
color.addData('red','red')
color.addData('red','red')
color.addData('red','red')
color.addData('blue','blue')
color.addData('blue','blue')
color.addData('blue','blue')
color.addData('blue','blue')
color.addData('pink','pink')
color.addData('pink','pink')
color.addData('pink','pink')
color.addData('pink','pink')
color.addData('pink','pink')
color.addData('pink','red')
color.addData('pink','red')
color.addData('blue','red')
color.addData('blue','red')
color.addData('red','blue')
color.addData('green','green')
color.addData('red','green')
color.computeRates()
self.color = color
# Simulate a face recognition problem with a
# probe set of 1000 and a gallery set of 1000
# 0.001 FAR and 0.100 FRR
sim_face = ConfusionMatrix()
sim_face.addData('accept','accept',900)
sim_face.addData('reject','reject',998001)
sim_face.addData('accept','reject',100)
sim_face.addData('reject','accept',999)
sim_face.computeRates()
self.sim_face = sim_face
def test_color(self):
#print
#print self.color
self.assertAlmostEquals(self.color.update_rate(),0.6842,places=4)
self.assertAlmostEquals(self.color.confidenceInterval()[0],0.4345,places=4)
self.assertAlmostEquals(self.color.confidenceInterval()[1],0.8742,places=4)
def test_verification(self):
self.assertAlmostEquals(self.sim_face.update_rate(),0.99890100000000004,places=4)
self.assertAlmostEquals(self.sim_face.confidenceInterval()[0],0.99883409247930877,places=4)
self.assertAlmostEquals(self.sim_face.confidenceInterval()[1],0.99896499025635421,places=4)
| {
"content_hash": "8ec1f3ca015d78ace0b25059f1915827",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 99,
"avg_line_length": 33.72049689440994,
"alnum_prop": 0.5675078283293424,
"repo_name": "svohara/pyvision",
"id": "93177cd0d37f7a47a4df13b7d52ef44dc99214c2",
"size": "6987",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/pyvision/analysis/classifier/ConfusionMatrix.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1371785"
},
{
"name": "R",
"bytes": "1487"
},
{
"name": "Shell",
"bytes": "166"
}
],
"symlink_target": ""
} |
"""Test the Store interface
Many methods simply proxy to config/metadata/data store implementations, but there is some
cross-coordination and there are some convenience methods implemented at this layer.
"""
import os
import numpy as np
import numpy.testing
from pytest import fixture, raises
from smif.data_layer import Store
from smif.data_layer.data_array import DataArray
from smif.data_layer.memory_interface import (MemoryConfigStore,
MemoryDataStore,
MemoryMetadataStore)
from smif.exception import SmifDataError, SmifDataNotFoundError
from smif.metadata import Spec
@fixture
def store():
"""Store fixture
"""
# implement each part using the memory classes, simpler than mocking
# each other implementation of a part is tested fully by e.g. test_config_store.py
return Store(
config_store=MemoryConfigStore(),
metadata_store=MemoryMetadataStore(),
data_store=MemoryDataStore()
)
@fixture
def full_store(store, get_sos_model, get_sector_model, energy_supply_sector_model,
sample_scenarios, model_run, sample_dimensions):
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
for scenario in sample_scenarios:
store.write_scenario(scenario)
store.write_sos_model(get_sos_model)
store.write_model_run(model_run)
return store
class TestStoreConfig():
def test_model_runs(self, store, minimal_model_run):
# write
store.write_model_run(minimal_model_run)
# read all
assert store.read_model_runs() == [minimal_model_run]
# read one
assert store.read_model_run(minimal_model_run['name']) == minimal_model_run
# update
store.update_model_run(minimal_model_run['name'], minimal_model_run)
# delete
store.delete_model_run(minimal_model_run['name'])
assert store.read_model_runs() == []
def test_read_model_run_sorted(self, store, minimal_model_run):
y_model_run = {'name': 'y'}
z_model_run = {'name': 'z'}
store.write_model_run(minimal_model_run)
store.write_model_run(z_model_run)
store.write_model_run(y_model_run)
expected = [minimal_model_run, y_model_run, z_model_run]
assert store.read_model_runs() == expected
def test_sos_models(self, store, get_sos_model, get_sector_model,
energy_supply_sector_model, sample_scenarios):
# write
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
for scenario in sample_scenarios:
store.write_scenario(scenario)
store.write_sos_model(get_sos_model)
# read all
assert store.read_sos_models() == [get_sos_model]
# read one
assert store.read_sos_model(get_sos_model['name']) == get_sos_model
# update
store.update_sos_model(get_sos_model['name'], get_sos_model)
# delete
store.delete_sos_model(get_sos_model['name'])
assert store.read_sos_models() == []
def test_models(self, store, get_sector_model, sample_dimensions):
# setup
for dim in sample_dimensions:
store.write_dimension(dim)
# write
store.write_model(get_sector_model)
# read all
assert store.read_models() == [get_sector_model]
# read one
assert store.read_model(get_sector_model['name']) == get_sector_model
# update
store.update_model(get_sector_model['name'], get_sector_model)
# delete
store.delete_model(get_sector_model['name'])
assert store.read_models() == []
# teardown
for dim in sample_dimensions:
store.delete_dimension(dim['name'])
def test_models_skip_coords(self, store, get_sector_model, get_sector_model_no_coords):
# write
store.write_model(get_sector_model)
# read all
assert store.read_models(skip_coords=True) == [get_sector_model_no_coords]
# read one
assert store.read_model(get_sector_model['name'],
skip_coords=True) == get_sector_model_no_coords
# update
store.update_model(get_sector_model['name'], get_sector_model)
# delete
store.delete_model(get_sector_model['name'])
assert store.read_models() == []
def test_scenarios(self, store, scenario, sample_dimensions):
# setup
for dim in sample_dimensions:
store.write_dimension(dim)
# write
store.write_scenario(scenario)
# read all
assert store.read_scenarios() == [scenario]
# read one
assert store.read_scenario(scenario['name']) == scenario
# update
store.update_scenario(scenario['name'], scenario)
# delete
store.delete_scenario(scenario['name'])
assert store.read_scenarios() == []
# teardown
for dim in sample_dimensions:
store.delete_dimension(dim['name'])
def test_scenarios_skip_coords(self, store, scenario, scenario_no_coords):
# write
store.write_scenario(scenario)
# read all
assert store.read_scenarios(skip_coords=True) == [scenario_no_coords]
# read one
assert store.read_scenario(scenario['name'], skip_coords=True) == scenario_no_coords
# update
store.update_scenario(scenario['name'], scenario)
# delete
store.delete_scenario(scenario['name'])
assert store.read_scenarios() == []
def test_scenario_variants(self, store, scenario):
scenario_name = scenario['name']
old_variant = scenario['variants'][0]
# write
store.write_scenario(scenario)
new_variant = {
'name': 'high',
'description': 'Mortality (High)',
'data': {
'mortality': 'mortality_high.csv'
}
}
store.write_scenario_variant(scenario_name, new_variant)
# read all
both = store.read_scenario_variants(scenario_name)
assert both == [old_variant, new_variant] or both == [new_variant, old_variant]
# read one
assert store.read_scenario_variant(scenario_name, old_variant['name']) == old_variant
# update
store.update_scenario_variant(scenario_name, old_variant['name'], old_variant)
# delete
store.delete_scenario_variant(scenario_name, old_variant['name'])
assert store.read_scenario_variants(scenario_name) == [new_variant]
def test_prepare_scenario(self, store, scenario, scenario_2_variants,
scenario_no_variant, sample_dimensions):
for dim in sample_dimensions:
store.write_dimension(dim)
# Insert template_scenario dict in underlying
# MemoryConfigStore
store.write_scenario(scenario)
store.write_scenario(scenario_2_variants)
store.write_scenario(scenario_no_variant)
list_of_variants = range(1, 4)
# Must raise exception if scenario defines > 1 variants
with raises(SmifDataError) as ex:
store.prepare_scenario(scenario_2_variants['name'], list_of_variants)
assert "must define one unique template variant" in str(ex.value)
# Must raise exception if scenario defines 0 variants
with raises(SmifDataError) as ex:
store.prepare_scenario(scenario_no_variant['name'], list_of_variants)
assert "must define one unique template variant" in str(ex.value)
store.prepare_scenario(scenario['name'], list_of_variants)
updated_scenario = store.read_scenario(scenario['name'])
assert len(updated_scenario['variants']) == 3
assert updated_scenario['variants'][0]['name'] == 'mortality_001'
new_variant = store.read_scenario_variant(scenario['name'], 'mortality_001')
#
assert new_variant['name'] == 'mortality_001'
assert new_variant['description'] == 'mortality variant number 001'
assert new_variant['data']['mortality'] == 'mortality_low001.csv'
assert updated_scenario['variants'][1]['name'] == 'mortality_002'
new_variant = store.read_scenario_variant(scenario['name'], 'mortality_002')
assert new_variant['name'] == 'mortality_002'
assert new_variant['description'] == 'mortality variant number 002'
assert new_variant['data']['mortality'] == 'mortality_low002.csv'
assert updated_scenario['variants'][2]['name'] == 'mortality_003'
new_variant = store.read_scenario_variant(scenario['name'], 'mortality_003')
assert new_variant['name'] == 'mortality_003'
assert new_variant['description'] == 'mortality variant number 003'
assert new_variant['data']['mortality'] == 'mortality_low003.csv'
def test_prepare_model_runs(self, store, model_run, sample_scenarios, sample_dimensions):
for dim in sample_dimensions:
store.write_dimension(dim)
scenario = sample_scenarios[0]
store.write_model_run(model_run)
store.write_strategies(model_run['name'], model_run['strategies'])
store.write_scenario(scenario)
# Generate 2 model runs for variants Low and High
store.prepare_model_runs(model_run['name'], scenario['name'], 0, 1)
list_of_mr = store.read_model_runs()
assert len(list_of_mr) == 3
assert list_of_mr[0] == model_run
assert list_of_mr[1]['name'] == model_run['name'] + '_' + scenario['variants'][0][
'name']
assert list_of_mr[2]['name'] == model_run['name'] + '_' + scenario['variants'][1][
'name']
store.delete_model_run(list_of_mr[1]['name'])
store.delete_model_run(list_of_mr[2]['name'])
# Generate only one model run for variant Low
store.prepare_model_runs(model_run['name'], scenario['name'], 0, 0)
list_of_mr = store.read_model_runs()
assert len(list_of_mr) == 2
assert list_of_mr[0] == model_run
assert list_of_mr[1]['name'] == model_run['name'] + '_' + scenario['variants'][0][
'name']
# Tidy up batch file
os.remove('{}.batch'.format(model_run['name']))
def test_narratives(self, store, get_sos_model):
store.write_sos_model(get_sos_model)
expected = get_sos_model['narratives'][0]
# read one
assert store.read_narrative(get_sos_model['name'], expected['name']) == expected
def test_strategies(self, store, strategies):
model_run_name = 'test_modelrun'
# write
store.write_strategies(model_run_name, strategies)
# read
assert store.read_strategies(model_run_name) == strategies
class TestStoreMetadata():
def test_units(self, store, unit_definitions):
# write
store.write_unit_definitions(unit_definitions)
# read
assert store.read_unit_definitions() == unit_definitions
def test_dimensions(self, store, dimension):
# write
store.write_dimension(dimension)
# read all
assert store.read_dimensions() == [dimension]
# read one
assert store.read_dimension(dimension['name']) == dimension
# update
store.update_dimension(dimension['name'], dimension)
# delete
store.delete_dimension(dimension['name'])
assert store.read_dimensions() == []
class TestStoreData():
@fixture(scope='function')
def setup(self, store, sample_dimensions, scenario,
sample_scenario_data):
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_scenario(scenario)
# pick out single sample
key = next(iter(sample_scenario_data))
scenario_name, variant_name, variable = key
scenario_variant_data = sample_scenario_data[key]
# write
store.write_scenario_variant_data(
scenario_name, variant_name, scenario_variant_data
)
return key, scenario_variant_data
def test_convert_strategies_data(self, empty_store, store, strategies):
src_store = store
tgt_store = empty_store # Store with target data format
model_run_name = 'test_modelrun'
# write
src_store.write_strategies(model_run_name, strategies)
# convert
src_store.convert_strategies_data(model_run_name, tgt_store)
# assert
for strategy in strategies:
if 'interventions' in strategy: # If the stategy fixture defines interventions
expected = src_store.read_strategy_interventions(strategy)
assert expected == tgt_store.read_strategy_interventions(strategy)
def test_scenario_variant_data(self, store,
setup):
# The sample_scenario_data fixture provides data with a spec including timestep
# dimension containing a single coordinate of 2015. Note the asymmetry in the write
# and read methods here: writing requires the full DataArray object with the full
# spec including timestep, but the reading requires a specific timestep to be supplied.
# The data read back in, therefore, has lower dimensionality.
key, scenario_variant_data = setup
scenario_name, variant_name, variable = key
assert store.read_scenario_variant_data(scenario_name, variant_name, variable,
2015, assert_exists=True)
# Read 2015
actual = store.read_scenario_variant_data(
scenario_name, variant_name, variable, 2015
)
assert (actual.data == scenario_variant_data.data[0]).all()
# Read 2016
actual = store.read_scenario_variant_data(
scenario_name, variant_name, variable, 2016
)
assert (actual.data == scenario_variant_data.data[1]).all()
def test_convert_scenario_data(self, empty_store, store, sample_dimensions, scenario,
sample_scenario_data, model_run):
src_store = store
tgt_store = empty_store # Store with target data format
# setup
model_run['scenarios'] = {'mortality': 'low'}
model_run['timesteps'] = [2015, 2016]
src_store.write_model_run(model_run)
tgt_store.write_model_run(model_run)
for dim in sample_dimensions:
src_store.write_dimension(dim)
tgt_store.write_dimension(dim)
src_store.write_scenario(scenario)
tgt_store.write_scenario(scenario)
# pick out single sample
key = next(iter(sample_scenario_data))
scenario_name, variant_name, variable = key
scenario_variant_data = sample_scenario_data[key]
# write
src_store.write_scenario_variant_data(
scenario_name, variant_name, scenario_variant_data
)
src_store.convert_scenario_data(model_run['name'], tgt_store)
for variant in src_store.read_scenario_variants(scenario_name):
for variable in variant['data']:
expected = src_store.read_scenario_variant_data(
scenario_name, variant['name'], variable, timesteps=model_run['timesteps'])
result = tgt_store.read_scenario_variant_data(
scenario_name, variant['name'], variable, timesteps=model_run['timesteps'])
assert result == expected
def test_scenario_variant_data_mult_one_year(self, store, setup):
key, scenario_variant_data = setup
scenario_name, variant_name, variable = key
actual = store.read_scenario_variant_data(
scenario_name, variant_name, variable, timesteps=[2016])
assert (actual.data == [scenario_variant_data.data[1]]).all()
def test_scenario_variant_data_mult_mult_years(self, store, setup):
key, scenario_variant_data = setup
scenario_name, variant_name, variable = key
actual = store.read_scenario_variant_data(
scenario_name, variant_name, variable, timesteps=[2015, 2016])
assert (actual.data == scenario_variant_data.data).all()
def test_narrative_variant_data(self, store, sample_dimensions, get_sos_model,
get_sector_model, energy_supply_sector_model,
sample_narrative_data):
# Setup
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_sos_model(get_sos_model)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
# pick out single sample
key = (
'energy',
'technology',
'high_tech_dsm',
'smart_meter_savings'
)
sos_model_name, narrative_name, variant_name, param_name = key
narrative_variant_data = sample_narrative_data[key]
# write
store.write_narrative_variant_data(
sos_model_name, narrative_name, variant_name, narrative_variant_data)
assert store.read_narrative_variant_data(
sos_model_name, narrative_name, variant_name, param_name, assert_exists=True)
# read
actual = store.read_narrative_variant_data(
sos_model_name, narrative_name, variant_name, param_name)
assert actual == narrative_variant_data
def test_convert_narrative_data(self, empty_store, store, sample_dimensions, get_sos_model,
get_sector_model, energy_supply_sector_model,
sample_narrative_data, model_run):
src_store = store
tgt_store = empty_store
# Setup
for dim in sample_dimensions:
src_store.write_dimension(dim)
tgt_store.write_dimension(dim)
src_store.write_sos_model(get_sos_model)
src_store.write_model(get_sector_model)
src_store.write_model(energy_supply_sector_model)
tgt_store.write_sos_model(get_sos_model)
tgt_store.write_model(get_sector_model)
tgt_store.write_model(energy_supply_sector_model)
for narrative in get_sos_model['narratives']:
for variant in narrative['variants']:
for param_name in variant['data']:
key = (
get_sos_model['name'],
narrative['name'],
variant['name'],
param_name
)
narrative_variant_data = sample_narrative_data[key]
# write
src_store.write_narrative_variant_data(
get_sos_model['name'], narrative['name'], variant['name'],
narrative_variant_data)
src_store.convert_narrative_data(model_run['sos_model'], tgt_store)
for narrative in get_sos_model['narratives']:
for variant in narrative['variants']:
for param_name in variant['data']:
expected = src_store.read_narrative_variant_data(
get_sos_model['name'], narrative['name'], variant['name'], param_name)
result = tgt_store.read_narrative_variant_data(
get_sos_model['name'], narrative['name'], variant['name'], param_name)
assert result == expected
def test_model_parameter_default(self, store, get_multidimensional_param,
get_sector_model, sample_dimensions):
param_data = get_multidimensional_param
for dim in sample_dimensions:
store.write_dimension(dim)
for dim in param_data.dims:
store.write_dimension({'name': dim, 'elements': param_data.dim_elements(dim)})
get_sector_model['parameters'] = [param_data.spec.as_dict()]
store.write_model(get_sector_model)
# write
store.write_model_parameter_default(
get_sector_model['name'], param_data.name, param_data)
assert store.read_model_parameter_default(get_sector_model['name'],
param_data.name, assert_exists=True)
# read
actual = store.read_model_parameter_default(get_sector_model['name'], param_data.name)
assert actual == param_data
def test_convert_model_parameter_default_data(self, empty_store, store,
get_multidimensional_param,
get_sector_model, sample_dimensions):
src_store = store
tgt_store = empty_store
param_data = get_multidimensional_param
for store in [src_store, tgt_store]:
for dim in sample_dimensions:
store.write_dimension(dim)
for dim in param_data.dims:
store.write_dimension({'name': dim, 'elements': param_data.dim_elements(dim)})
get_sector_model['parameters'] = [param_data.spec.as_dict()]
store.write_model(get_sector_model)
# write
src_store.write_model_parameter_default(
get_sector_model['name'], param_data.name, param_data)
# convert
src_store.convert_model_parameter_default_data(get_sector_model['name'], tgt_store)
expected = src_store.read_model_parameter_default(
get_sector_model['name'], param_data.name)
result = tgt_store.read_model_parameter_default(
get_sector_model['name'], param_data.name)
assert result == expected
def test_interventions(self, store, sample_dimensions, get_sector_model, interventions):
# setup
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_model(get_sector_model)
# write
store.write_interventions(get_sector_model['name'], interventions)
# read
assert store.read_interventions(get_sector_model['name']) == interventions
def test_convert_interventions_data(self, empty_store, store, sample_dimensions,
get_sector_model, interventions):
src_store = store
tgt_store = empty_store
get_sector_model['interventions'] = ['energy_demand.csv']
# setup
for dim in sample_dimensions:
src_store.write_dimension(dim)
tgt_store.write_dimension(dim)
src_store.write_model(get_sector_model)
tgt_store.write_model(get_sector_model)
# write
src_store.write_interventions(get_sector_model['name'], interventions)
src_store.convert_interventions_data(get_sector_model['name'], tgt_store)
assert interventions == tgt_store.read_interventions(get_sector_model['name'])
def test_read_write_interventions_file(self, store, sample_dimensions,
get_sector_model, interventions):
# setup
for dim in sample_dimensions:
store.write_dimension(dim)
get_sector_model['interventions'] = ['path']
store.write_model(get_sector_model)
# write
store.write_interventions_file(get_sector_model['name'], 'path', interventions)
# check data existence
assert store.read_interventions_file(
get_sector_model['name'], 'path', assert_exists=True)
result = store.read_interventions_file(get_sector_model['name'], 'path')
assert result == interventions
def test_initial_conditions(self, store, sample_dimensions, initial_conditions,
get_sos_model, get_sector_model, energy_supply_sector_model,
minimal_model_run):
# setup
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_sos_model(get_sos_model)
store.write_model_run(minimal_model_run)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
# write
store.write_initial_conditions(get_sector_model['name'], initial_conditions)
# read
assert store.read_initial_conditions(get_sector_model['name']) == initial_conditions
# read all for a model run
actual = store.read_all_initial_conditions(minimal_model_run['name'])
assert actual == initial_conditions
def test_convert_initial_conditions_data(self, empty_store, store, sample_dimensions,
initial_conditions, get_sos_model,
get_sector_model, energy_supply_sector_model,
minimal_model_run):
src_store = store
tgt_store = empty_store
get_sector_model['initial_conditions'] = ['energy_demand.csv']
for store in [src_store, tgt_store]:
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_sos_model(get_sos_model)
store.write_model_run(minimal_model_run)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
src_store.write_initial_conditions(get_sector_model['name'], initial_conditions)
src_store.convert_initial_conditions_data(get_sector_model['name'], tgt_store)
assert initial_conditions == tgt_store.read_initial_conditions(
get_sector_model['name'])
def test_read_write_initial_conditions_file(self, store, sample_dimensions,
get_sector_model, initial_conditions):
# setup
for dim in sample_dimensions:
store.write_dimension(dim)
get_sector_model['initial_conditions'] = ['path']
store.write_model(get_sector_model)
# write
store.write_initial_conditions_file(get_sector_model['name'],
'path', initial_conditions)
assert store.read_initial_conditions_file(get_sector_model['name'], 'path',
assert_exists=True)
result = store.read_initial_conditions_file(get_sector_model['name'], 'path')
assert result == initial_conditions
def test_state(self, store, state):
# write
store.write_state(state, 'model_run_name', 0, 0)
# read
assert store.read_state('model_run_name', 0, 0) == state
def test_conversion_coefficients(self, store, conversion_coefficients):
# write
store.write_coefficients(
'source_dim', 'sink_dim', conversion_coefficients)
# read
numpy.testing.assert_equal(
store.read_coefficients('source_dim', 'sink_dim'),
conversion_coefficients
)
def test_results(self, store, sample_results):
# write
store.write_results(sample_results, 'model_run_name', 'model_name', 0)
# read
spec = sample_results.spec
assert store.read_results('model_run_name', 'model_name', spec, 0) == sample_results
# check
assert store.available_results('model_run_name') == [
(0, None, 'model_name', spec.name)
]
# delete
store.clear_results('model_run_name')
assert not store.available_results('model_run_name')
def test_no_completed_jobs(self, full_store):
expected = []
actual = full_store.completed_jobs('unique_model_run_name')
assert actual == expected
def test_expected_model_outputs(self, full_store):
actual = full_store.expected_model_outputs('unique_model_run_name')
expected = [('energy_demand', 'gas_demand')]
assert actual == expected
def test_some_completed_jobs(self, full_store, sample_gas_demand_results):
expected = [
(2015, 0, 'energy_demand'),
(2020, 0, 'energy_demand'),
]
full_store.write_results(
sample_gas_demand_results, 'unique_model_run_name', 'energy_demand', 2015, 0)
full_store.write_results(
sample_gas_demand_results, 'unique_model_run_name', 'energy_demand', 2020, 0)
actual = full_store.completed_jobs('unique_model_run_name')
assert actual == expected
def test_filter_complete_available_results(self, store):
available_results = [
(2020, 0, 'test_model', 'output_a'),
(2020, 0, 'test_model', 'output_b'),
(2025, 0, 'test_model', 'output_a'),
(2030, 0, 'other_model', 'output_other'),
]
model_outputs = [
('test_model', 'output_a'),
('test_model', 'output_b'),
]
expected = [
(2020, 0, 'test_model')
]
actual = store.filter_complete_available_results(available_results, model_outputs)
assert actual == expected
def test_warm_start(self, store, sample_results):
assert store.prepare_warm_start('test_model_run') is None
timestep = 2020
store.write_results(sample_results, 'test_model_run', 'model_name', timestep)
assert store.prepare_warm_start('test_model_run') == timestep
def test_canonical_available_results(self, store, sample_results):
store.write_results(sample_results, 'model_run_name', 'model_name', 2010, 0)
store.write_results(sample_results, 'model_run_name', 'model_name', 2015, 0)
store.write_results(sample_results, 'model_run_name', 'model_name', 2010, 1)
store.write_results(sample_results, 'model_run_name', 'model_name', 2015, 1)
store.write_results(sample_results, 'model_run_name', 'model_name', 2020, 1)
output_name = sample_results.spec.name
correct_results = set()
correct_results.add((2010, 0, 'model_name', output_name))
correct_results.add((2015, 0, 'model_name', output_name))
correct_results.add((2020, 0, 'model_name', output_name))
assert (store.canonical_available_results('model_run_name') == correct_results)
def test_canonical_expected_results(
self, store, sample_dimensions, get_sos_model, get_sector_model,
energy_supply_sector_model, model_run
):
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_sos_model(get_sos_model)
store.write_model_run(model_run)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
correct_results = set()
correct_results.add((2015, 0, 'energy_demand', 'gas_demand'))
correct_results.add((2020, 0, 'energy_demand', 'gas_demand'))
correct_results.add((2025, 0, 'energy_demand', 'gas_demand'))
assert (store.canonical_expected_results(model_run['name']) == correct_results)
def test_canonical_missing_results(
self, store, sample_dimensions, get_sos_model, get_sector_model,
energy_supply_sector_model, model_run
):
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_sos_model(get_sos_model)
store.write_model_run(model_run)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
# All the results are missing
missing_results = set()
missing_results.add((2015, 0, 'energy_demand', 'gas_demand'))
missing_results.add((2020, 0, 'energy_demand', 'gas_demand'))
missing_results.add((2025, 0, 'energy_demand', 'gas_demand'))
assert (store.canonical_missing_results(model_run['name']) == missing_results)
spec = Spec(name='gas_demand', dtype='float')
data = np.array(1, dtype=float)
fake_data = DataArray(spec, data)
store.write_results(fake_data, model_run['name'], 'energy_demand', 2015, 0)
missing_results.remove((2015, 0, 'energy_demand', 'gas_demand'))
assert (store.canonical_missing_results(model_run['name']) == missing_results)
def test_get_results(self):
# This is difficult to test without fixtures defining an entire canonical project.
# See smif issue #304 (https://github.com/nismod/smif/issues/304).
# Todo: mock a store with known results that can be obtained with get_results(...)
# This requires a model run with sector model, and a sector model with valid inputs and
# outputs, and results with valid spec, etc. Some of this functionality exists in
# fixtures provided in `conftest.py`.
pass
class TestWrongRaises:
def test_narrative_variant(self, store, sample_dimensions,
get_sos_model, get_sector_model,
energy_supply_sector_model,
sample_narrative_data):
# Setup
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_sos_model(get_sos_model)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
# pick out single sample
key = (
'energy',
'technology',
'high_tech_dsm',
'smart_meter_savings'
)
sos_model_name, narrative_name, variant_name, param_name = key
with raises(SmifDataNotFoundError) as ex:
store.read_narrative_variant_data(
sos_model_name, narrative_name, 'bla', param_name)
expected = "Variant name 'bla' does not exist in narrative 'technology'"
assert expected in str(ex.value)
def test_narrative_name(self, store, sample_dimensions, get_sos_model,
get_sector_model, energy_supply_sector_model,
sample_narrative_data):
# Setup
for dim in sample_dimensions:
store.write_dimension(dim)
store.write_sos_model(get_sos_model)
store.write_model(get_sector_model)
store.write_model(energy_supply_sector_model)
# pick out single sample
key = (
'energy',
'technology',
'high_tech_dsm',
'smart_meter_savings'
)
sos_model_name, narrative_name, variant_name, param_name = key
with raises(SmifDataNotFoundError) as ex:
store.read_narrative_variant_data(
sos_model_name, 'bla', variant_name, param_name)
expected = "Narrative name 'bla' does not exist in sos_model 'energy'"
assert expected in str(ex.value)
| {
"content_hash": "f16f75160ce849820ea6bf598af99ced",
"timestamp": "",
"source": "github",
"line_count": 817,
"max_line_length": 95,
"avg_line_length": 42.6328029375765,
"alnum_prop": 0.6062702764778501,
"repo_name": "nismod/smif",
"id": "f47f4daa07bb641c071034241b0bb34bfc97a9bf",
"size": "34831",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/data_layer/test_store.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3104"
},
{
"name": "HTML",
"bytes": "317"
},
{
"name": "JavaScript",
"bytes": "329939"
},
{
"name": "Python",
"bytes": "897935"
},
{
"name": "Shell",
"bytes": "2590"
}
],
"symlink_target": ""
} |
import math
import tensorflow as tf
from random import shuffle
import utils
# Initialization parameters
STD_DEV = 0.1
BIAS_BIAS = 0.1
# Network parameters
INPUT_DIMENSION = 28
INPUT_SIZE = 784 # 28x28
OUTPUT_SIZE = 10 # 0-9
CONV_POOL_LAYERS = [(5, 32, 1, 2), (5, 64, 1, 2)] # filter_size, features, conv_stride, pool_ksize
FULLY_CONNECTED_NEURONS = 1024
P_KEEP = 0.5
# Learning hyperparameters
EPOCHS = 50
ETA = 0.0025
LAMBDA = 0.0001
MINI_BATCH_SIZE = 32
def initialize_weights(shape):
return tf.Variable(tf.truncated_normal(shape, stddev=STD_DEV))
def initialize_biases(shape):
return tf.Variable(tf.constant(BIAS_BIAS, shape=shape))
def conv2d(x, W, stride):
return tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding='SAME')
def max_pool(x, ksize):
return tf.nn.max_pool(x, ksize=[1, ksize, ksize, 1], strides=[1, ksize, ksize, 1], padding='SAME')
def cnn(x, keep_prob):
x = tf.reshape(x, [-1, INPUT_DIMENSION, INPUT_DIMENSION, 1])
# L2 regularization to prevent overfitting
l2_loss = 0
# Create conv/pool layers
out_shape = (INPUT_DIMENSION, INPUT_DIMENSION)
prev_features = 1
for layer in CONV_POOL_LAYERS:
filter_size, features, conv_stride, pool_ksize = layer
W_conv = initialize_weights([filter_size, filter_size, prev_features, features])
b_conv = initialize_biases([features])
prev_features = features
l2_loss += tf.nn.l2_loss(W_conv)
conv_layer = tf.nn.relu(conv2d(x, W_conv, conv_stride) + b_conv)
out_shape = (
calc_conv_pool_same_output(out_shape[0], conv_stride),
calc_conv_pool_same_output(out_shape[1], conv_stride),
)
pool_layer = max_pool(conv_layer, pool_ksize)
out_shape = (
calc_conv_pool_same_output(out_shape[0], pool_ksize),
calc_conv_pool_same_output(out_shape[1], pool_ksize),
)
x = pool_layer
# Create fully-connected layer
W_fc = initialize_weights([prev_features * out_shape[0] * out_shape[1], FULLY_CONNECTED_NEURONS])
b_fc = initialize_biases([FULLY_CONNECTED_NEURONS])
l2_loss += tf.nn.l2_loss(W_fc)
conv_pool_flat = tf.reshape(x, [-1, prev_features * out_shape[0] * out_shape[1]])
fc = tf.nn.relu(tf.matmul(conv_pool_flat, W_fc) + b_fc)
# Drop-out regularization to prevent overfitting
dropout = tf.nn.dropout(fc, keep_prob)
W_out = initialize_weights([FULLY_CONNECTED_NEURONS, OUTPUT_SIZE])
b_out = initialize_biases([OUTPUT_SIZE])
l2_loss += tf.nn.l2_loss(W_out)
out = tf.matmul(dropout, W_out) + b_out
return out, l2_loss
def calc_conv_pool_same_output(inp, stride):
return int(math.ceil(float(inp) / float(stride)))
def format_for_tf(data_x, data_y):
data = [(feature.reshape(INPUT_SIZE), utils.vectorize_y(label).reshape(OUTPUT_SIZE)) for feature, label in
zip(data_x, data_y)]
return map(list, zip(*data))
# Declare network parameter placeholderes
x = tf.placeholder(tf.float32, [None, INPUT_SIZE])
y = tf.placeholder(tf.float32, [None, OUTPUT_SIZE])
keep_prob = tf.placeholder(tf.float32)
# Create model
net, l2_loss = cnn(x, keep_prob)
# Set up cost/optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(net, y) + LAMBDA * l2_loss)
optimizer = tf.train.AdamOptimizer(learning_rate=ETA).minimize(cost)
# Create function to measure accuracy
eval_output = tf.equal(tf.argmax(net, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(eval_output, tf.float32))
init = tf.initialize_all_variables()
# Get labeled data and re-format for TF
train_x, train_y, valid_x, valid_y, test_x, test_y = utils.load_mnist_data()
train_data = [(feature.reshape(INPUT_SIZE), utils.vectorize_y(label).reshape(OUTPUT_SIZE)) for feature, label in
zip(train_x, train_y)]
valid_x, valid_y = format_for_tf(valid_x, valid_y)
test_x, test_y = format_for_tf(test_x, test_y)
sess = tf.InteractiveSession()
sess.run(init)
for n in range(EPOCHS):
shuffle(train_data)
# mini-batch gradient descent
for s in range(0, len(train_data), MINI_BATCH_SIZE):
mini_batch = train_data[s:s + MINI_BATCH_SIZE]
batch_x, batch_y = map(list, zip(*mini_batch))
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: P_KEEP})
curr_cost, curr_accuracy = sess.run([cost, accuracy], feed_dict={x: valid_x, y: valid_y, keep_prob: 1.0})
print("Epoch " + str(n + 1) + ": " + str(curr_accuracy) + "; cost=" + str(curr_cost))
final_cost, final_accuracy = sess.run([cost, accuracy], feed_dict={x: test_x, y: test_y, keep_prob: 1.0})
print("----------------------------------------")
print("Test accuracy: " + str(final_accuracy) + "; cost=" + str(final_cost))
| {
"content_hash": "434e900b498659392e2df1f9f279eed8",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 112,
"avg_line_length": 33.878571428571426,
"alnum_prop": 0.6540164452877926,
"repo_name": "tonypeng/ml-playground",
"id": "d733a9114a97d289e5d825bd8e00c7783f0f89d8",
"size": "4743",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digit-recognition/main_cnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18712"
}
],
"symlink_target": ""
} |
mystr = 'xxxxxasher'
myposition = mystr.find('asher')
print("the position of 'asher' is in %s of %s" % (myposition, mystr))
print('*' * 20)
if 'asher' in mystr:
mykouer = mystr.replace('asher', 'kouer')
print('mykouer is %s' % mykouer)
else:
print('asher is not in %s' % mystr)
if mystr.find('aasher'):
print("have find 'asher' in %s" % mystr)
else:
print("not found")
newstr = '\t asher \t \n'
print(newstr)
print(newstr.strip())
print(newstr.lstrip())
print(newstr.rstrip())
upstr = mystr.upper()
print(upstr)
print(upstr.lower())
if mystr.isalpha():
print('isalpha is this %s' % mystr)
else:
print('isalpha is not this %s' % mystr)
pass
mystr = '133232'
if mystr.isdigit():
print('digit is this %s' % mystr)
else:
print('digit is not this %s' % mystr)
pass
import string
strnew = string.ascii_lowercase
print(strnew)
print(len(strnew))
| {
"content_hash": "42e7dcf36c28bb7590b78f2e3ac2b596",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 69,
"avg_line_length": 18.708333333333332,
"alnum_prop": 0.6403118040089086,
"repo_name": "lichengshuang/createvhost",
"id": "82d358046120346f5fa0a857ef1cb4871c434893",
"size": "922",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/others/System/mystr.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "84170"
},
{
"name": "C",
"bytes": "25320"
},
{
"name": "CSS",
"bytes": "1323"
},
{
"name": "HTML",
"bytes": "26691"
},
{
"name": "JavaScript",
"bytes": "205981"
},
{
"name": "Makefile",
"bytes": "529"
},
{
"name": "Python",
"bytes": "915418"
},
{
"name": "Roff",
"bytes": "6734"
},
{
"name": "Shell",
"bytes": "1548839"
},
{
"name": "Vim script",
"bytes": "56257"
}
],
"symlink_target": ""
} |
import uuid
import networkx as nx
from solar import utils
from .traversal import states
from solar.interfaces.db import get_db
db = get_db()
def save_graph(graph):
# maybe it is possible to store part of information in AsyncResult backend
uid = graph.graph['uid']
db.create(uid, graph.graph, db.COLLECTIONS.plan_graph)
for n in graph:
collection = db.COLLECTIONS.plan_node.name + ':' + uid
db.create(n, properties=graph.node[n], collection=collection)
db.create_relation_str(uid, n, type_=db.RELATION_TYPES.graph_to_node)
for u, v, properties in graph.edges(data=True):
type_ = db.RELATION_TYPES.plan_edge.name + ':' + uid
db.create_relation_str(u, v, properties, type_=type_)
def get_graph(uid):
dg = nx.MultiDiGraph()
collection = db.COLLECTIONS.plan_node.name + ':' + uid
type_ = db.RELATION_TYPES.plan_edge.name + ':' + uid
dg.graph = db.get(uid, collection=db.COLLECTIONS.plan_graph).properties
dg.add_nodes_from([(n.uid, n.properties) for n in db.all(collection=collection)])
dg.add_edges_from([(i['source'], i['dest'], i['properties'])
for i in db.all_relations(type_=type_, db_convert=False)])
return dg
get_plan = get_graph
def parse_plan(plan_path):
""" parses yaml definition and returns graph
"""
plan = utils.yaml_load(plan_path)
dg = nx.MultiDiGraph()
dg.graph['name'] = plan['name']
for task in plan['tasks']:
defaults = {
'status': 'PENDING',
'errmsg': None,
}
defaults.update(task['parameters'])
dg.add_node(
task['uid'], **defaults)
for v in task.get('before', ()):
dg.add_edge(task['uid'], v)
for u in task.get('after', ()):
dg.add_edge(u, task['uid'])
return dg
def create_plan_from_graph(dg, save=True):
dg.graph['uid'] = "{0}:{1}".format(dg.graph['name'], str(uuid.uuid4()))
if save:
save_graph(dg)
return dg
def show(uid):
dg = get_graph(uid)
result = {}
tasks = []
result['uid'] = dg.graph['uid']
result['name'] = dg.graph['name']
for n in nx.topological_sort(dg):
data = dg.node[n]
tasks.append(
{'uid': n,
'parameters': data,
'before': dg.successors(n),
'after': dg.predecessors(n)
})
result['tasks'] = tasks
return utils.yaml_dump(result)
def create_plan(plan_path, save=True):
"""
"""
dg = parse_plan(plan_path)
return create_plan_from_graph(dg, save=save)
def update_plan(uid, plan_path):
"""update preserves old status of tasks if they werent removed
"""
new = parse_plan(plan_path)
old = get_graph(uid)
return update_plan_from_graph(new, old).graph['uid']
def update_plan_from_graph(new, old):
new.graph = old.graph
for n in new:
if n in old:
new.node[n]['status'] = old.node[n]['status']
save_graph(new)
return new
def reset_by_uid(uid, state_list=None):
dg = get_graph(uid)
return reset(dg, state_list=state_list)
def reset(graph, state_list=None):
for n in graph:
if state_list is None or graph.node[n]['status'] in state_list:
graph.node[n]['status'] = states.PENDING.name
save_graph(graph)
def reset_filtered(uid):
reset_by_uid(uid, state_list=[states.SKIPPED.name, states.NOOP.name])
def report_topo(uid):
dg = get_graph(uid)
report = []
for task in nx.topological_sort(dg):
data = dg.node[task]
report.append([
task,
data['status'],
data['errmsg'],
data.get('start_time'),
data.get('end_time')])
return report
| {
"content_hash": "9dd6ba6af7a29c3604d53432765324e2",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 85,
"avg_line_length": 26.020689655172415,
"alnum_prop": 0.5860058309037901,
"repo_name": "torgartor21/solar",
"id": "f210b86bc6191992c60dede5a6e24be09dda6292",
"size": "4383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solar/solar/orchestration/graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "82954"
},
{
"name": "Python",
"bytes": "289854"
},
{
"name": "Shell",
"bytes": "1785"
}
],
"symlink_target": ""
} |
from .stnet import *
| {
"content_hash": "3c617b8718702c972a489dca42f72474",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 20,
"avg_line_length": 21,
"alnum_prop": 0.7142857142857143,
"repo_name": "kuke/models",
"id": "db952550a12b34853556fa42bba04c823bc7cbe4",
"size": "21",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "fluid/PaddleCV/video/models/stnet/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "15149"
},
{
"name": "Perl",
"bytes": "2072"
},
{
"name": "Python",
"bytes": "2905007"
},
{
"name": "Shell",
"bytes": "2506531"
}
],
"symlink_target": ""
} |
'''
whooshalchemy flask extension
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Adds whoosh indexing capabilities to SQLAlchemy models for Flask
applications.
:copyright: (c) 2012 by Karl Gyllstrom
:license: BSD (see LICENSE.txt)
'''
from __future__ import absolute_import
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_testing import TestCase
import flask_whooshalchemy as wa
from whoosh.analysis import StemmingAnalyzer, DoubleMetaphoneFilter
import datetime
import os
import tempfile
import shutil
db = SQLAlchemy()
class BlogishBlob(object):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Text)
content = db.Column(db.String)
blurb = db.Column(db.Unicode)
ignored = db.Column(db.Unicode)
created = db.Column(db.DateTime(), default=datetime.datetime.utcnow())
def __repr__(self):
return '{0}(title={1})'.format(self.__class__.__name__, self.title)
def _after_flush(app, changes):
from sqlalchemy.orm import EXT_CONTINUE
return EXT_CONTINUE
class ObjectA(db.Model, BlogishBlob):
__tablename__ = 'objectA'
__searchable__ = ['title', 'content', 'blurb']
class ObjectB(db.Model, BlogishBlob):
__tablename__ = 'objectB'
__searchable__ = ['title', 'content', 'content'] # dup intentional
class ObjectC(db.Model, BlogishBlob):
__tablename__ = 'objectC'
__searchable__ = ['title', 'field_that_doesnt_exist']
class ObjectD(db.Model, BlogishBlob):
__tablename__ = 'objectD'
__searchable__ = ['title']
__analyzer__ = StemmingAnalyzer() | DoubleMetaphoneFilter()
class Tests(TestCase):
DATABASE_URL = 'sqlite://'
TESTING = True
def create_app(self):
tmp_dir = tempfile.mkdtemp()
app = Flask(__name__)
app.config['WHOOSH_BASE'] = os.path.join(tmp_dir, 'whoosh')
return app
def setUp(self):
db.init_app(self.app)
db.create_all()
def tearDown(self):
try:
shutil.rmtree(self.app.config['WHOOSH_BASE'])
except OSError as e:
if e.errno != 2: # code 2 - no such file or directory
raise
db.drop_all()
def test_all(self):
title1 = u'a slightly long title'
title2 = u'another title'
title3 = u'wow another title'
obj = ObjectA(title=u'title', blurb='this is a blurb')
db.session.add(obj)
db.session.commit()
self.assertEqual(len(list(ObjectA.query.whoosh_search('blurb'))), 1)
db.session.delete(obj)
db.session.commit()
db.session.add(ObjectA(title=title1, content=u'hello world', ignored=u'no match'))
db.session.commit()
self.assertEqual(len(list(ObjectA.query.whoosh_search('what'))), 0)
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'no match'))), 0)
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'title'))), 1)
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'hello'))), 1)
db.session.add(ObjectB(title=u'my title', content=u'hello world'))
db.session.commit()
# make sure does not interfere with ObjectA's results
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'what'))), 0)
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'title'))), 1)
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'hello'))), 1)
self.assertEqual(len(list(ObjectB.query.whoosh_search(u'what'))), 0)
self.assertEqual(len(list(ObjectB.query.whoosh_search(u'title'))), 1)
self.assertEqual(len(list(ObjectB.query.whoosh_search(u'hello'))), 1)
obj2 = ObjectA(title=title2, content=u'a different message')
db.session.add(obj2)
db.session.commit()
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'what'))), 0)
l = list(ObjectA.query.whoosh_search(u'title'))
self.assertEqual(len(l), 2)
# ranking should always be as follows, since title2 should have a higher relevance score
self.assertEqual(l[0].title, title2)
self.assertEqual(l[1].title, title1)
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'hello'))), 1)
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'message'))), 1)
self.assertEqual(len(list(ObjectB.query.whoosh_search(u'what'))), 0)
self.assertEqual(len(list(ObjectB.query.whoosh_search(u'title'))), 1)
self.assertEqual(len(list(ObjectB.query.whoosh_search(u'hello'))), 1)
self.assertEqual(len(list(ObjectB.query.whoosh_search(u'message'))), 0)
db.session.add(ObjectA(title=title3, content=u'a different message'))
db.session.commit()
l = list(ObjectA.query.whoosh_search(u'title'))
self.assertEqual(len(l), 3)
self.assertEqual(l[0].title, title2)
self.assertEqual(l[1].title, title3)
self.assertEqual(l[2].title, title1)
db.session.delete(obj2)
db.session.commit()
l = list(ObjectA.query.whoosh_search(u'title'))
self.assertEqual(len(l), 2)
self.assertEqual(l[0].title, title3)
self.assertEqual(l[1].title, title1)
two_days_ago = datetime.date.today() - datetime.timedelta(2)
title4 = u'a title that is significantly longer than the others'
db.session.add(ObjectA(title=title4, created=two_days_ago))
db.session.commit()
one_day_ago = datetime.date.today() - datetime.timedelta(1)
recent = list(ObjectA.query.whoosh_search(u'title')
.filter(ObjectA.created >= one_day_ago))
self.assertEqual(len(recent), 2)
self.assertEqual(l[0].title, title3)
self.assertEqual(l[1].title, title1)
three_days_ago = datetime.date.today() - datetime.timedelta(3)
l = list(ObjectA.query.whoosh_search(u'title')
.filter(ObjectA.created >= three_days_ago))
self.assertEqual(len(l), 3)
self.assertEqual(l[0].title, title3)
self.assertEqual(l[1].title, title1)
self.assertEqual(l[2].title, title4)
title5 = u'title with title as frequent title word'
db.session.add(ObjectA(title=title5))
db.session.commit()
l = list(ObjectA.query.whoosh_search(u'title'))
self.assertEqual(len(l), 4)
self.assertEqual(l[0].title, title5)
self.assertEqual(l[1].title, title3)
self.assertEqual(l[2].title, title1)
self.assertEqual(l[3].title, title4)
# test limit
l = list(ObjectA.query.whoosh_search(u'title', limit=2))
self.assertEqual(len(l), 2)
self.assertEqual(l[0].title, title5)
self.assertEqual(l[1].title, title3)
# XXX should replace this with a new function, but I can't figure out
# how to do this cleanly with flask sqlalchemy and testing
db.drop_all()
db.create_all()
title1 = u'my title'
db.session.add(ObjectA(title=title1, content=u'hello world'))
db.session.commit()
l = list(ObjectA.query.whoosh_search(u'title'))
self.assertEqual(len(l), 1)
l = list(ObjectA.query.whoosh_search(u'hello'))
self.assertEqual(len(l), 1)
l = list(ObjectA.query.whoosh_search(u'title', fields=('title',)))
self.assertEqual(len(l), 1)
l = list(ObjectA.query.whoosh_search(u'hello', fields=('title',)))
self.assertEqual(len(l), 0)
l = list(ObjectA.query.whoosh_search(u'title', fields=('content',)))
self.assertEqual(len(l), 0)
l = list(ObjectA.query.whoosh_search(u'hello', fields=('content',)))
self.assertEqual(len(l), 1)
l = list(ObjectA.query.whoosh_search(u'hello dude', fields=('content',), or_=True))
self.assertEqual(len(l), 1)
l = list(ObjectA.query.whoosh_search(u'hello dude', fields=('content',), or_=False))
self.assertEqual(len(l), 0)
# new function: test chaining
db.drop_all()
db.create_all()
db.session.add(ObjectA(title=u'title one', content=u'a poem'))
db.session.add(ObjectA(title=u'title two', content=u'about testing'))
db.session.add(ObjectA(title=u'title three', content=u'is delightfully tested'))
db.session.add(ObjectA(title=u'four', content=u'tests'))
db.session.commit()
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'title'))), 3)
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'test'))), 3)
# chained query, operates as AND
self.assertEqual(len(list(ObjectA.query.whoosh_search(u'title').whoosh_search(u'test'))),
2)
def test_invalid_attribute(self):
db.session.add(ObjectC(title=u'my title', content=u'hello world'))
self.assertRaises(AttributeError, db.session.commit)
def test_default_analyzer(self):
db.session.add(ObjectA(title=u'jumping', content=u''))
db.session.commit()
assert ['jumping'] == [obj.title for obj in ObjectA.query.whoosh_search(u'jump')]
def test_custom_analyzer(self):
from whoosh.analysis import SimpleAnalyzer
self.app.config['WHOOSH_ANALYZER'] = SimpleAnalyzer()
db.init_app(self.app)
db.create_all()
db.session.add(ObjectA(title=u'jumping', content=u''))
db.session.commit()
assert not list(ObjectA.query.whoosh_search(u'jump'))
assert ['jumping'] == [obj.title for obj in ObjectA.query.whoosh_search(u'jumping')]
db.session.add(ObjectD(title=u'Travelling', content=u'Stemming'))
db.session.add(ObjectD(title=u'travel', content=u'Unstemmed and normal'))
db.session.add(ObjectD(title=u'trevel', content=u'Mispelt'))
db.session.commit()
# When mispelt on either the indexed side or the query side, they should all return 3 due to the DoubleMetaphoneFilter
self.assertEqual(len(list(ObjectD.query.whoosh_search('travelling'))), 3)
self.assertEquals(len(list(ObjectD.query.whoosh_search('trovel'))), 3)
if __name__ == '__main__':
import unittest
unittest.main()
| {
"content_hash": "246c33ca189e3dd1777ce4e77f8200e7",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 126,
"avg_line_length": 34.97931034482759,
"alnum_prop": 0.6332807570977917,
"repo_name": "gyllstromk/Flask-WhooshAlchemy",
"id": "95bb11d866e8928e31676ab1eb6194affb102d66",
"size": "10144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_all.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "20622"
}
],
"symlink_target": ""
} |
"""
Load a CSV raster file to RasterAggregatedLayer object and related NumericRasterAggregateData.
NOTE: Input CSV expected to be AGGREGATED.
"""
import os
import csv
import gzip
import datetime
from django.core.management.base import BaseCommand, CommandError
from django.contrib.gis.geos import Point
from django.utils import timezone
from ...models import RasterAggregatedLayer, NumericRasterAggregateData
WGS84_SRID = 4326
SPHERICAL_MERCATOR_SRID = 3857 # google maps projection
COMMIT_COUNT = 50000
def load_raster_csv(filepath, layer_name, csv_encoding, pixel_size, csv_srid, indexes, lon_idx, lat_idx, datetime_idx, datetime_format_str, opacity, no_datetime=False, no_headers=False, aggregation_method="mean"):
open_func = open
if filepath.lower().endswith(".gz"):
open_func = gzip.open
with open_func(filepath, "rt", encoding=csv_encoding) as in_f:
reader = csv.reader(in_f)
headers = None
if not no_headers:
headers = next(reader) # remove headers
# prepare KPI raster layers
index_layers = {}
for data_idx in indexes:
if not headers:
kpi_name = "Unknown (no-headers)"
else:
kpi_name = headers[data_idx]
if not layer_name:
layer_name = "{} ({})".format(os.path.split(filepath)[-1], kpi_name)
layer = RasterAggregatedLayer(name=layer_name,
filepath=filepath,
data_model="NumericRasterAggregateData",
opacity=opacity,
aggregation_method=aggregation_method,
pixel_size_meters=pixel_size,
minimum_samples=1, # sample number is not known for pre-aggregated items.
)
layer.save()
index_layers[data_idx] = layer
count = 0
pixels = []
expected_indexes = [lon_idx, lat_idx, datetime_idx]
for row in reader:
if row and all(row[idx] for idx in expected_indexes):
if no_datetime:
datetime_value = timezone.now()
else:
naive_datetime_value = datetime.datetime.strptime(row[datetime_idx], datetime_format_str)
current_timezone = timezone.get_default_timezone()
datetime_value = timezone.make_aware(naive_datetime_value, current_timezone)
lon = float(row[lon_idx])
lat = float(row[lat_idx])
p = Point(lon, lat, srid=csv_srid)
for value_idx in indexes:
if row[value_idx]:
# currently only supporting numeric values!
value = float(row[value_idx])
data = NumericRasterAggregateData(layer=index_layers[value_idx],
location=p,
dt = datetime_value,
mean=value,
samples=1)
pixels.append(data)
if len(pixels) >= COMMIT_COUNT:
NumericRasterAggregateData.objects.bulk_create(pixels)
count += len(pixels)
pixels = []
if pixels:
NumericRasterAggregateData.objects.bulk_create(pixels)
count += len(pixels)
return index_layers.values(), count
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument("-f", "--filepath",
required=True,
default=None,
help="CSV Raster File to load")
parser.add_argument("-e", "--encoding",
default="utf8",
help="Encoding of the CSV file [DEFAULT='utf8']")
parser.add_argument("-p", "--pixel-size",
type=int,
default=5,
help="CSV Raster Pixel Size (meters)")
parser.add_argument("-c", "--csv-srid",
dest="csv_srid",
type=int,
default=WGS84_SRID,
help="Input CSV Lon/Lat SRID. (DEFAULT=4326 [WGS84])")
parser.add_argument("-i", "--indexes",
type=int,
default=[3,],
nargs="+",
help="Column indexes for the 'value(s)' to be loaded [DEFAULT=(3,)]")
parser.add_argument("--lon-idx",
dest="lon_idx",
default=2,
type=int,
help="Column Index (0 start) of 'longitude' in decimal degrees [DEFAULT=1]")
parser.add_argument("--lat-idx",
dest="lat_idx",
default=1,
type=int,
help="Column Index (0 start) of 'latitude' in decimal degrees [DEFAULT=2]")
parser.add_argument("-n", "--name",
default=None,
type=str,
help="If given this name will be applied to resulting RasterAggregatedLayer [DEFAULT=None]")
parser.add_argument("-o", "--opacity",
default=0.75,
type=float,
help="Layer Suggested Opacity [DEFAULT={}]".format(0.75))
parser.add_argument("-d", "--datetime-idx",
default=0,
type=int,
help="Column index of datetime [DEFAULT=0]")
parser.add_argument("--datetime-format-str",
default="%H:%M:%S.%f %d-%m-%Y",
help="Datetime format string to use [DEFAULT='%%H:%%M:%%S.%%f %%d-%%m-%%Y']")
parser.add_argument("--no-datetime",
default=False,
action="store_true",
help="If given datetime column will not be necessary, and load time will be used.")
parser.add_argument("--no-headers",
default=False,
action="store_true",
help="If given the first line will be *included* as data")
def handle(self, *args, **options):
result_layers, count = load_raster_csv(options["filepath"],
options["name"],
options["encoding"],
options["pixel_size"],
options["csv_srid"],
options["indexes"],
options["lon_idx"],
options["lat_idx"],
options["datetime_idx"],
options["datetime_format_str"],
options["opacity"],
options["no_datetime"],
options["no_headers"])
self.stdout.write("Created ({}) pixels in the following RasterAggregatedLayer(s): ".format(count))
for raster_layer in result_layers:
# auto create legend
legend = raster_layer.auto_create_legend(more_is_better=True)
raster_layer.legend = legend
raster_layer.save()
# create map layer (for viewing)
raster_layer.create_map_layer()
self.stdout.write("[{}] {}".format(raster_layer.id, raster_layer.name))
| {
"content_hash": "181ca0298b0c596c81fd94b25442f99f",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 213,
"avg_line_length": 48.532544378698226,
"alnum_prop": 0.4567178736893441,
"repo_name": "monkut/deso",
"id": "41ba413af48716b5d61e0bc7f52e65d21beddf16",
"size": "8202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deso/deso/layers/raster/management/commands/load_raster_csv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "27379"
},
{
"name": "CSS",
"bytes": "16381"
},
{
"name": "HTML",
"bytes": "9485"
},
{
"name": "JavaScript",
"bytes": "230986"
},
{
"name": "Python",
"bytes": "114756"
}
],
"symlink_target": ""
} |
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'payway_demo.views.home', name='home'),
# url(r'^payway_demo/', include('payway_demo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
(r'^accounts/', include('payway.accounts.urls')),
(r'^webmoney/', include('payway.webmoney.urls')),
(r'^qiwi/', include('payway.qiwi.urls')),
(r'^orders/', include('payway.orders.urls')),
(r'^merchants/', include('payway.merchants.urls')),
(r'^i18n/', include('django.conf.urls.i18n')),
)
| {
"content_hash": "7f3309d27913389e03d7f9c94006cb65",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 71,
"avg_line_length": 38.47826086956522,
"alnum_prop": 0.6621468926553672,
"repo_name": "RANUX/django-payway",
"id": "44821d972c8cfb737decf27198f4b3e9d3260697",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "payway_demo/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "99640"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, with_statement
import errno
import glob
import os
import re
import shutil
import signal
import stat
import subprocess
import sys
import time
import tempfile
import warnings
from collections import namedtuple
from datetime import datetime
from distutils.version import LooseVersion #pylint: disable=import-error, no-name-in-module
import yaml
from six import iteritems, print_, string_types
from ccmlib import common, extension
from ccmlib.cli_session import CliSession
from ccmlib.repository import setup
from six.moves import xrange
class Status():
UNINITIALIZED = "UNINITIALIZED"
UP = "UP"
DOWN = "DOWN"
DECOMMISSIONED = "DECOMMISSIONED"
class NodeError(Exception):
def __init__(self, msg, process=None):
Exception.__init__(self, msg)
self.process = process
class TimeoutError(Exception):
def __init__(self, data):
Exception.__init__(self, str(data))
class ToolError(Exception):
def __init__(self, command, exit_status, stdout=None, stderr=None):
self.command = command
self.exit_status = exit_status
self.stdout = stdout
self.stderr = stderr
message = "Subprocess {} exited with non-zero status; exit status: {}".format(command, exit_status)
if stdout:
message += "; \nstdout: "
message += stdout
if stderr:
message += "; \nstderr: "
message += stderr
Exception.__init__(self, message)
# Groups: 1 = cf, 2 = tmp or none, 3 = suffix (Compacted or Data.db)
_sstable_regexp = re.compile('((?P<keyspace>[^\s-]+)-(?P<cf>[^\s-]+)-)?(?P<tmp>tmp(link)?-)?(?P<version>[^\s-]+)-(?P<number>\d+)-(?P<format>([a-z]+)-)?(?P<suffix>[a-zA-Z]+)\.[a-zA-Z0-9]+$')
class Node(object):
"""
Provides interactions to a Cassandra node.
"""
def __init__(self, name, cluster, auto_bootstrap, thrift_interface, storage_interface, jmx_port, remote_debug_port, initial_token, save=True, binary_interface=None, byteman_port='0', environment_variables=None, byteman_startup_script=None):
"""
Create a new Node.
- name: the name for that node
- cluster: the cluster this node is part of
- auto_bootstrap: whether or not this node should be set for auto-bootstrap
- thrift_interface: the (host, port) tuple for thrift
- storage_interface: the (host, port) tuple for internal cluster communication
- jmx_port: the port for JMX to bind to
- remote_debug_port: the port for remote debugging
- initial_token: the token for this node. If None, use Cassandra token auto-assignment
- save: copy all data useful for this node to the right position. Leaving this true
is almost always the right choice.
"""
self.name = name
self.cluster = cluster
self.status = Status.UNINITIALIZED
self.auto_bootstrap = auto_bootstrap
self.network_interfaces = {'thrift': common.normalize_interface(thrift_interface),
'storage': common.normalize_interface(storage_interface),
'binary': common.normalize_interface(binary_interface)}
self.jmx_port = jmx_port
self.remote_debug_port = remote_debug_port
self.byteman_port = byteman_port
self.byteman_startup_script = byteman_startup_script
self.initial_token = initial_token
self.pid = None
self.data_center = None
self.workloads = []
self._dse_config_options = {}
self.__config_options = {}
self.__install_dir = None
self.__global_log_level = None
self.__classes_log_level = {}
self.__environment_variables = environment_variables or {}
self.__conf_updated = False
if save:
self.import_config_files()
self.import_bin_files()
if common.is_win():
self.__clean_bat()
@staticmethod
def load(path, name, cluster):
"""
Load a node from from the path on disk to the config files, the node name and the
cluster the node is part of.
"""
node_path = os.path.join(path, name)
filename = os.path.join(node_path, 'node.conf')
with open(filename, 'r') as f:
data = yaml.load(f)
try:
itf = data['interfaces']
initial_token = None
if 'initial_token' in data:
initial_token = data['initial_token']
remote_debug_port = 2000
if 'remote_debug_port' in data:
remote_debug_port = data['remote_debug_port']
binary_interface = None
if 'binary' in itf and itf['binary'] is not None:
binary_interface = tuple(itf['binary'])
thrift_interface = None
if 'thrift' in itf and itf['thrift'] is not None:
thrift_interface = tuple(itf['thrift'])
node = cluster.create_node(data['name'], data['auto_bootstrap'], thrift_interface, tuple(itf['storage']), data['jmx_port'], remote_debug_port, initial_token, save=False, binary_interface=binary_interface, byteman_port=data['byteman_port'])
node.status = data['status']
if 'pid' in data:
node.pid = int(data['pid'])
if 'install_dir' in data:
node.__install_dir = data['install_dir']
if 'config_options' in data:
node.__config_options = data['config_options']
if 'dse_config_options' in data:
node._dse_config_options = data['dse_config_options']
if 'environment_variables' in data:
node.__environment_variables = data['environment_variables']
if 'data_center' in data:
node.data_center = data['data_center']
if 'workloads' in data:
node.workloads = data['workloads']
return node
except KeyError as k:
raise common.LoadError("Error Loading " + filename + ", missing property: " + str(k))
def get_path(self):
"""
Returns the path to this node top level directory (where config/data is stored)
"""
return os.path.join(self.cluster.get_path(), self.name)
def get_bin_dir(self):
"""
Returns the path to the directory where Cassandra scripts are located
"""
return os.path.join(self.get_path(), 'bin')
def get_tool(self, toolname):
return common.join_bin(self.get_install_dir(), 'bin', toolname)
def get_tool_args(self, toolname):
return [common.join_bin(self.get_install_dir(), 'bin', toolname)]
def get_env(self):
update_conf = not self.__conf_updated
if update_conf:
self.__conf_updated = True
env = common.make_cassandra_env(self.get_install_dir(), self.get_path(), update_conf)
for (key, value) in self.__environment_variables.items():
env[key] = value
return env
def get_install_cassandra_root(self):
return self.get_install_dir()
def get_node_cassandra_root(self):
return self.get_path()
def get_conf_dir(self):
"""
Returns the path to the directory where Cassandra config are located
"""
return os.path.join(self.get_path(), 'conf')
def address(self):
"""
Returns the IP use by this node for internal communication
"""
return self.network_interfaces['storage'][0]
def get_install_dir(self):
"""
Returns the path to the cassandra source directory used by this node.
"""
if self.__install_dir is None:
return self.cluster.get_install_dir()
else:
common.validate_install_dir(self.__install_dir)
return self.__install_dir
def node_setup(self, version, verbose):
dir, v = setup(version, verbose=verbose)
return dir
def set_install_dir(self, install_dir=None, version=None, verbose=False):
"""
Sets the path to the cassandra source directory for use by this node.
"""
if version is None:
self.__install_dir = install_dir
if install_dir is not None:
common.validate_install_dir(install_dir)
else:
self.__install_dir = self.node_setup(version, verbose=verbose)
if self.get_base_cassandra_version() >= '4':
self.network_interfaces['thrift'] = None
self.import_config_files()
self.import_bin_files()
self.__conf_updated = False
return self
def set_workloads(self, workloads):
raise common.ArgumentError("Cannot set workloads on a cassandra node")
def get_cassandra_version(self):
try:
return common.get_version_from_build(self.get_install_dir())
except common.CCMError:
return self.cluster.cassandra_version()
def get_base_cassandra_version(self):
version = self.get_cassandra_version()
return float('.'.join(version.vstring.split('.')[:2]))
def set_configuration_options(self, values=None):
"""
Set Cassandra configuration options.
ex:
node.set_configuration_options(values={
'hinted_handoff_enabled' : True,
'concurrent_writes' : 64,
})
"""
if not hasattr(self,'_config_options') or self.__config_options is None:
self.__config_options = {}
if values is not None:
self.__config_options = common.merge_configuration(self.__config_options, values)
self.import_config_files()
def set_environment_variable(self, key, value):
self.__environment_variables[key] = value
self.import_config_files()
def set_batch_commitlog(self, enabled=False):
"""
The batch_commitlog option gives an easier way to switch to batch
commitlog (since it requires setting 2 options and unsetting one).
"""
if enabled:
values = {
"commitlog_sync": "batch",
"commitlog_sync_batch_window_in_ms": 5,
"commitlog_sync_period_in_ms": None
}
else:
values = {
"commitlog_sync": "periodic",
"commitlog_sync_batch_window_in_ms": 10000,
"commitlog_sync_period_in_ms": None
}
self.set_configuration_options(values)
def set_dse_configuration_options(self, values=None):
pass
def show(self, only_status=False, show_cluster=True):
"""
Print infos on this node configuration.
"""
self.__update_status()
indent = ''.join([" " for i in xrange(0, len(self.name) + 2)])
print_("{}: {}".format(self.name, self.__get_status_string()))
if not only_status:
if show_cluster:
print_("{}{}={}".format(indent, 'cluster', self.cluster.name))
print_("{}{}={}".format(indent, 'auto_bootstrap', self.auto_bootstrap))
if self.network_interfaces['thrift'] is not None:
print_("{}{}={}".format(indent, 'thrift', self.network_interfaces['thrift']))
if self.network_interfaces['binary'] is not None:
print_("{}{}={}".format(indent, 'binary', self.network_interfaces['binary']))
print_("{}{}={}".format(indent, 'storage', self.network_interfaces['storage']))
print_("{}{}={}".format(indent, 'jmx_port', self.jmx_port))
print_("{}{}={}".format(indent, 'remote_debug_port', self.remote_debug_port))
print_("{}{}={}".format(indent, 'byteman_port', self.byteman_port))
print_("{}{}={}".format(indent, 'initial_token', self.initial_token))
if self.pid:
print_("{}{}={}".format(indent, 'pid', self.pid))
def is_running(self):
"""
Return true if the node is running
"""
self.__update_status()
return self.status == Status.UP or self.status == Status.DECOMMISSIONED
def is_live(self):
"""
Return true if the node is live (it's run and is not decommissioned).
"""
self.__update_status()
return self.status == Status.UP
def logfilename(self):
"""
Return the path to the current Cassandra log of this node.
"""
return os.path.join(self.get_path(), 'logs', 'system.log')
def debuglogfilename(self):
return os.path.join(self.get_path(), 'logs', 'debug.log')
def gclogfilename(self):
return os.path.join(self.get_path(), 'logs', 'gc.log.0.current')
def compactionlogfilename(self):
return os.path.join(self.get_path(), 'logs', 'compaction.log')
def envfilename(self):
return os.path.join(
self.get_conf_dir(),
common.CASSANDRA_WIN_ENV if common.is_win() else common.CASSANDRA_ENV
)
def grep_log(self, expr, filename='system.log', from_mark=None):
"""
Returns a list of lines matching the regular expression in parameter
in the Cassandra log of this node
"""
matchings = []
pattern = re.compile(expr)
with open(os.path.join(self.get_path(), 'logs', filename)) as f:
if from_mark:
f.seek(from_mark)
for line in f:
m = pattern.search(line)
if m:
matchings.append((line, m))
return matchings
def grep_log_for_errors(self, filename='system.log'):
"""
Returns a list of errors with stack traces
in the Cassandra log of this node
"""
return self.grep_log_for_errors_from(seek_start=getattr(self, 'error_mark', 0))
def grep_log_for_errors_from(self, filename='system.log', seek_start=0):
with open(os.path.join(self.get_path(), 'logs', filename)) as f:
f.seek(seek_start)
return _grep_log_for_errors(f.read())
def mark_log_for_errors(self, filename='system.log'):
"""
Ignore errors behind this point when calling
node.grep_log_for_errors()
"""
self.error_mark = self.mark_log(filename)
def mark_log(self, filename='system.log'):
"""
Returns "a mark" to the current position of this node Cassandra log.
This is for use with the from_mark parameter of watch_log_for_* methods,
allowing to watch the log from the position when this method was called.
"""
log_file = os.path.join(self.get_path(), 'logs', filename)
if not os.path.exists(log_file):
return 0
with open(log_file) as f:
f.seek(0, os.SEEK_END)
return f.tell()
def print_process_output(self, name, proc, verbose=False):
# If stderr_file exists on the process, we opted to
# store stderr in a separate temporary file, consume that.
if hasattr(proc, 'stderr_file') and proc.stderr_file is not None:
proc.stderr_file.seek(0)
stderr = proc.stderr_file.read()
else:
try:
stderr = proc.communicate()[1]
except ValueError:
stderr = ''
if len(stderr) > 1:
print_("[{} ERROR] {}".format(name, stderr.strip()))
# This will return when exprs are found or it timeouts
def watch_log_for(self, exprs, from_mark=None, timeout=600, process=None, verbose=False, filename='system.log'):
"""
Watch the log until one or more (regular) expression are found.
This methods when all the expressions have been found or the method
timeouts (a TimeoutError is then raised). On successful completion,
a list of pair (line matched, match object) is returned.
"""
elapsed = 0
tofind = [exprs] if isinstance(exprs, string_types) else exprs
tofind = [re.compile(e) for e in tofind]
matchings = []
reads = ""
if len(tofind) == 0:
return None
log_file = os.path.join(self.get_path(), 'logs', filename)
output_read = False
while not os.path.exists(log_file):
time.sleep(.5)
if process and not output_read:
process.poll()
if process.returncode is not None:
self.print_process_output(self.name, process, verbose)
output_read = True
if process.returncode != 0:
raise RuntimeError() # Shouldn't reuse RuntimeError but I'm lazy
with open(log_file) as f:
if from_mark:
f.seek(from_mark)
while True:
# First, if we have a process to check, then check it.
# Skip on Windows - stdout/stderr is cassandra.bat
if not common.is_win() and not output_read:
if process:
process.poll()
if process.returncode is not None:
self.print_process_output(self.name, process, verbose)
output_read = True
if process.returncode != 0:
raise RuntimeError() # Shouldn't reuse RuntimeError but I'm lazy
line = f.readline()
if line:
reads = reads + line
for e in tofind:
m = e.search(line)
if m:
matchings.append((line, m))
tofind.remove(e)
if len(tofind) == 0:
return matchings[0] if isinstance(exprs, string_types) else matchings
else:
# yep, it's ugly
time.sleep(1)
elapsed = elapsed + 1
if elapsed > timeout:
raise TimeoutError(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()) + " [" + self.name + "] Missing: " + str([e.pattern for e in tofind]) + ":\n" + reads[:50] + ".....\nSee {} for remainder".format(filename))
if process:
if common.is_win():
if not self.is_running():
return None
else:
process.poll()
if process.returncode == 0:
return None
def watch_log_for_death(self, nodes, from_mark=None, timeout=600, filename='system.log'):
"""
Watch the log of this node until it detects that the provided other
nodes are marked dead. This method returns nothing but throw a
TimeoutError if all the requested node have not been found to be
marked dead before timeout sec.
A mark as returned by mark_log() can be used as the from_mark
parameter to start watching the log from a given position. Otherwise
the log is watched from the beginning.
"""
tofind = nodes if isinstance(nodes, list) else [nodes]
tofind = ["%s is now [dead|DOWN]" % node.address() for node in tofind]
self.watch_log_for(tofind, from_mark=from_mark, timeout=timeout, filename=filename)
def watch_log_for_alive(self, nodes, from_mark=None, timeout=120, filename='system.log'):
"""
Watch the log of this node until it detects that the provided other
nodes are marked UP. This method works similarly to watch_log_for_death.
"""
tofind = nodes if isinstance(nodes, list) else [nodes]
tofind = ["%s.* now UP" % node.address() for node in tofind]
self.watch_log_for(tofind, from_mark=from_mark, timeout=timeout, filename=filename)
def wait_for_binary_interface(self, **kwargs):
"""
Waits for the Binary CQL interface to be listening. If > 1.2 will check
log for 'Starting listening for CQL clients' before checking for the
interface to be listening.
Emits a warning if not listening after 30 seconds.
"""
if self.cluster.version() >= '1.2':
self.watch_log_for("Starting listening for CQL clients", **kwargs)
binary_itf = self.network_interfaces['binary']
if not common.check_socket_listening(binary_itf, timeout=30):
warnings.warn("Binary interface %s:%s is not listening after 30 seconds, node may have failed to start."
% (binary_itf[0], binary_itf[1]))
def wait_for_thrift_interface(self, **kwargs):
"""
Waits for the Thrift interface to be listening.
Emits a warning if not listening after 30 seconds.
"""
if self.cluster.version() >= '4':
return;
self.watch_log_for("Listening for thrift clients...", **kwargs)
thrift_itf = self.network_interfaces['thrift']
if not common.check_socket_listening(thrift_itf, timeout=30):
warnings.warn("Thrift interface {}:{} is not listening after 30 seconds, node may have failed to start.".format(thrift_itf[0], thrift_itf[1]))
def get_launch_bin(self):
cdir = self.get_install_dir()
launch_bin = common.join_bin(cdir, 'bin', 'cassandra')
# Copy back the cassandra scripts since profiling may have modified it the previous time
shutil.copy(launch_bin, self.get_bin_dir())
return common.join_bin(self.get_path(), 'bin', 'cassandra')
def add_custom_launch_arguments(self, args):
pass
def start(self,
join_ring=True,
no_wait=False,
verbose=False,
update_pid=True,
wait_other_notice=True,
replace_token=None,
replace_address=None,
jvm_args=None,
wait_for_binary_proto=False,
profile_options=None,
use_jna=False,
quiet_start=False,
allow_root=False,
set_migration_task=True):
"""
Start the node. Options includes:
- join_ring: if false, start the node with -Dcassandra.join_ring=False
- no_wait: by default, this method returns when the node is started and listening to clients.
If no_wait=True, the method returns sooner.
- wait_other_notice: if truthy, this method returns only when all other live node of the cluster
have marked this node UP. if an integer, sets the timeout for how long to wait
- replace_token: start the node with the -Dcassandra.replace_token option.
- replace_address: start the node with the -Dcassandra.replace_address option.
"""
if jvm_args is None:
jvm_args = []
if set_migration_task and self.cluster.cassandra_version() >= '3.0.1':
jvm_args += ['-Dcassandra.migration_task_wait_in_seconds={}'.format(len(self.cluster.nodes) * 2)]
# Validate Windows env
if common.is_modern_windows_install(self.cluster.version()) and not common.is_ps_unrestricted():
raise NodeError("PS Execution Policy must be unrestricted when running C* 2.1+")
if not common.is_win() and quiet_start:
common.warning("Tried to set Windows quiet start behavior, but we're not running on Windows.")
if self.is_running():
raise NodeError("{} is already running".format(self.name))
for itf in list(self.network_interfaces.values()):
if itf is not None and replace_address is None:
common.assert_socket_available(itf)
if wait_other_notice:
marks = [(node, node.mark_log()) for node in list(self.cluster.nodes.values()) if node.is_live()]
self.mark = self.mark_log()
launch_bin = self.get_launch_bin()
# If Windows, change entries in .bat file to split conf from binaries
if common.is_win():
self.__clean_bat()
if profile_options is not None:
config = common.get_config()
if 'yourkit_agent' not in config:
raise NodeError("Cannot enable profile. You need to set 'yourkit_agent' to the path of your agent in a ~/.ccm/config")
cmd = '-agentpath:{}'.format(config['yourkit_agent'])
if 'options' in profile_options:
cmd = cmd + '=' + profile_options['options']
print_(cmd)
# Yes, it's fragile as shit
pattern = r'cassandra_parms="-Dlog4j.configuration=log4j-server.properties -Dlog4j.defaultInitOverride=true'
common.replace_in_file(launch_bin, pattern, ' ' + pattern + ' ' + cmd + '"')
os.chmod(launch_bin, os.stat(launch_bin).st_mode | stat.S_IEXEC)
env = self.get_env()
extension.append_to_server_env(self, env)
if common.is_win():
self._clean_win_jmx()
pidfile = os.path.join(self.get_path(), 'cassandra.pid')
args = [launch_bin]
self.add_custom_launch_arguments(args)
args = args + ['-p', pidfile, '-Dcassandra.join_ring=%s' % str(join_ring)]
args.append('-Dcassandra.logdir=%s' % os.path.join(self.get_path(), 'logs'))
if replace_token is not None:
args.append('-Dcassandra.replace_token=%s' % str(replace_token))
if replace_address is not None:
args.append('-Dcassandra.replace_address=%s' % str(replace_address))
if use_jna is False:
args.append('-Dcassandra.boot_without_jna=true')
if allow_root:
args.append('-R')
env['JVM_EXTRA_OPTS'] = env.get('JVM_EXTRA_OPTS', "") + " " + " ".join(jvm_args)
# In case we are restarting a node
# we risk reading the old cassandra.pid file
self._delete_old_pid()
process = None
FNULL = open(os.devnull, 'w')
stdout_sink = subprocess.PIPE if verbose else FNULL
# write stderr to a temporary file to prevent overwhelming pipe (> 65K data).
stderr = tempfile.SpooledTemporaryFile(max_size=0xFFFF)
if common.is_win():
# clean up any old dirty_pid files from prior runs
if (os.path.isfile(self.get_path() + "/dirty_pid.tmp")):
os.remove(self.get_path() + "/dirty_pid.tmp")
if quiet_start and self.cluster.version() >= '2.2.4':
args.append('-q')
process = subprocess.Popen(args, cwd=self.get_bin_dir(), env=env, stdout=stdout_sink, stderr=stderr)
else:
process = subprocess.Popen(args, env=env, stdout=stdout_sink, stderr=stderr)
process.stderr_file = stderr
# Our modified batch file writes a dirty output with more than just the pid - clean it to get in parity
# with *nix operation here.
if verbose:
stdout, stderr = process.communicate()
print_(stdout)
print_(stderr)
if common.is_win():
self.__clean_win_pid()
self._update_pid(process)
print_("Started: {0} with pid: {1}".format(self.name, self.pid), file=sys.stderr, flush=True)
elif update_pid:
self._update_pid(process)
if not self.is_running():
raise NodeError("Error starting node %s" % self.name, process)
# If wait_other_notice is a bool, we don't want to treat it as a
# timeout. Other intlike types, though, we want to use.
if common.is_intlike(wait_other_notice) and not isinstance(wait_other_notice, bool):
for node, mark in marks:
node.watch_log_for_alive(self, from_mark=mark, timeout=wait_other_notice)
elif wait_other_notice:
for node, mark in marks:
node.watch_log_for_alive(self, from_mark=mark)
# If wait_for_binary_proto is a bool, we don't want to treat it as a
# timeout. Other intlike types, though, we want to use.
if common.is_intlike(wait_for_binary_proto) and not isinstance(wait_for_binary_proto, bool):
self.wait_for_binary_interface(from_mark=self.mark, timeout=wait_for_binary_proto)
elif wait_for_binary_proto:
self.wait_for_binary_interface(from_mark=self.mark)
return process
def stop(self, wait=True, wait_other_notice=False, signal_event=signal.SIGTERM, **kwargs):
"""
Stop the node.
- wait: if True (the default), wait for the Cassandra process to be
really dead. Otherwise return after having sent the kill signal.
- wait_other_notice: return only when the other live nodes of the
cluster have marked this node has dead.
- signal_event: Signal event to send to Cassandra; default is to
let Cassandra clean up and shut down properly (SIGTERM [15])
- Optional:
+ gently: Let Cassandra clean up and shut down properly; unless
false perform a 'kill -9' which shuts down faster.
"""
if self.is_running():
if wait_other_notice:
marks = [(node, node.mark_log()) for node in list(self.cluster.nodes.values()) if node.is_live() and node is not self]
if common.is_win():
# Just taskkill the instance, don't bother trying to shut it down gracefully.
# Node recovery should prevent data loss from hard shutdown.
# We have recurring issues with nodes not stopping / releasing files in the CI
# environment so it makes more sense just to murder it hard since there's
# really little downside.
# We want the node to flush its data before shutdown as some tests rely on small writes being present.
# The default Periodic sync at 10 ms may not have flushed data yet, causing tests to fail.
# This is not a hard requirement, however, so we swallow any exceptions this may throw and kill anyway.
if signal_event is signal.SIGTERM:
try:
self.flush()
except:
common.warning("Failed to flush node: {0} on shutdown.".format(self.name))
pass
os.system("taskkill /F /PID " + str(self.pid))
if self._find_pid_on_windows():
common.warning("Failed to terminate node: {0} with pid: {1}".format(self.name, self.pid))
else:
# Determine if the signal event should be updated to keep API compatibility
if 'gently' in kwargs and kwargs['gently'] is False:
signal_event = signal.SIGKILL
os.kill(self.pid, signal_event)
if wait_other_notice:
for node, mark in marks:
node.watch_log_for_death(self, from_mark=mark)
else:
time.sleep(.1)
still_running = self.is_running()
if still_running and wait:
wait_time_sec = 1
for i in xrange(0, 7):
# we'll double the wait time each try and cassandra should
# not take more than 1 minute to shutdown
time.sleep(wait_time_sec)
if not self.is_running():
return True
wait_time_sec = wait_time_sec * 2
raise NodeError("Problem stopping node %s" % self.name)
else:
return True
else:
return False
def wait_for_compactions(self, timeout=120):
"""
Wait for all compactions to finish on this node.
"""
pattern = re.compile("pending tasks: 0")
start = time.time()
while time.time() - start < timeout:
output, err, rc = self.nodetool("compactionstats")
if pattern.search(output):
return
time.sleep(1)
raise TimeoutError("{} [{}] Compactions did not finish in {} seconds".format(time.strftime("%d %b %Y %H:%M:%S", time.gmtime()), self.name, timeout))
def nodetool_process(self, cmd):
env = self.get_env()
nodetool = self.get_tool('nodetool')
args = [nodetool, '-h', 'localhost', '-p', str(self.jmx_port)]
args += cmd.split()
return subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
def nodetool(self, cmd):
p = self.nodetool_process(cmd)
return handle_external_tool_process(p, ['nodetool', '-h', 'localhost', '-p', str(self.jmx_port), cmd.split()])
def dsetool(self, cmd):
raise common.ArgumentError('Cassandra nodes do not support dsetool')
def dse(self, dse_options=None):
raise common.ArgumentError('Cassandra nodes do not support dse')
def hadoop(self, hadoop_options=None):
raise common.ArgumentError('Cassandra nodes do not support hadoop')
def hive(self, hive_options=None):
raise common.ArgumentError('Cassandra nodes do not support hive')
def pig(self, pig_options=None):
raise common.ArgumentError('Cassandra nodes do not support pig')
def sqoop(self, sqoop_options=None):
raise common.ArgumentError('Cassandra nodes do not support sqoop')
def bulkload_process(self, options):
loader_bin = common.join_bin(self.get_path(), 'bin', 'sstableloader')
env = self.get_env()
extension.append_to_client_env(self, env)
# CASSANDRA-8358 switched from thrift to binary port
host, port = self.network_interfaces['thrift'] if self.get_cassandra_version() < '2.2' else self.network_interfaces['binary']
args = ['-d', host, '-p', str(port)]
return subprocess.Popen([loader_bin] + args + options, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
def bulkload(self, options):
p = self.bulkload_process(options=options)
return handle_external_tool_process(p, ['sstable bulkload'] + options)
def scrub_process(self, options):
scrub_bin = self.get_tool('sstablescrub')
env = self.get_env()
return subprocess.Popen([scrub_bin] + options, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
def scrub(self, options):
p = self.scrub_process(options=options)
return handle_external_tool_process(p, ['sstablescrub'] + options)
def verify_process(self, options):
verify_bin = self.get_tool('sstableverify')
env = self.get_env()
return subprocess.Popen([verify_bin] + options, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
def verify(self, options):
p = self.verify_process(options=options)
return handle_external_tool_process(p, ['sstableverify'] + options)
def run_cli_process(self, cmds=None, cli_options=None):
if cli_options is None:
cli_options = []
cli = self.get_tool('cassandra-cli')
env = self.get_env()
host = self.network_interfaces['thrift'][0]
port = self.network_interfaces['thrift'][1]
args = ['-h', host, '-p', str(port), '--jmxport', str(self.jmx_port)] + cli_options
sys.stdout.flush()
p = subprocess.Popen([cli] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if cmds is not None:
for cmd in cmds.split(';'):
p.stdin.write(cmd + ';\n')
p.stdin.write("quit;\n")
return p
def run_cli(self, cmds=None, cli_options=None):
p = self.run_cli_process(cmds=cmds, cli_options=cli_options)
return handle_external_tool_process(p, ['cassandra-cli'] + [cli_options])
def run_cqlsh_process(self, cmds=None, cqlsh_options=None):
if cqlsh_options is None:
cqlsh_options = []
cqlsh = self.get_tool('cqlsh')
env = self.get_env()
extension.append_to_client_env(self, env)
if self.get_base_cassandra_version() >= 2.1:
host, port = self.network_interfaces['binary']
else:
host, port = self.network_interfaces['thrift']
args = []
args += cqlsh_options
extension.append_to_cqlsh_args(self, env, args)
args += [host, str(port)]
sys.stdout.flush()
if cmds is None:
if common.is_win():
subprocess.Popen([cqlsh] + args, env=env, creationflags=subprocess.CREATE_NEW_CONSOLE)
else:
os.execve(cqlsh, [common.platform_binary('cqlsh')] + args, env)
else:
p = subprocess.Popen([cqlsh] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE,
stdout=subprocess.PIPE, universal_newlines=True)
if cmds is not None:
for cmd in cmds.split(';'):
cmd = cmd.strip()
if cmd:
p.stdin.write(cmd + ';\n')
p.stdin.write("quit;\n")
return p
def run_cqlsh(self, cmds=None, cqlsh_options=None):
p = self.run_cqlsh_process(cmds, cqlsh_options)
return handle_external_tool_process(p, ['cqlsh', cmds, cqlsh_options])
def cli(self):
cdir = self.get_install_dir()
cli = common.join_bin(cdir, 'bin', 'cassandra-cli')
env = self.get_env()
host = self.network_interfaces['thrift'][0]
port = self.network_interfaces['thrift'][1]
args = ['-h', host, '-p', str(port), '--jmxport', str(self.jmx_port)]
return CliSession(subprocess.Popen([cli] + args, env=env, stdin=subprocess.PIPE, stderr=subprocess.PIPE, stdout=subprocess.PIPE))
def set_log_level(self, new_level, class_name=None):
known_level = ['TRACE', 'DEBUG', 'INFO', 'WARN', 'ERROR', 'OFF']
if new_level not in known_level:
raise common.ArgumentError("Unknown log level %s (use one of %s)" % (new_level, " ".join(known_level)))
if class_name:
self.__classes_log_level[class_name] = new_level
else:
self.__global_log_level = new_level
# loggers changed > 2.1
if self.get_base_cassandra_version() < 2.1:
self._update_log4j()
else:
self.__update_logback()
return self
#
# Update log4j config: copy new log4j-server.properties into
# ~/.ccm/name-of-cluster/nodeX/conf/log4j-server.properties
#
def update_log4j(self, new_log4j_config):
cassandra_conf_dir = os.path.join(self.get_conf_dir(),
'log4j-server.properties')
common.copy_file(new_log4j_config, cassandra_conf_dir)
#
# Update logback config: copy new logback.xml into
# ~/.ccm/name-of-cluster/nodeX/conf/logback.xml
#
def update_logback(self, new_logback_config):
cassandra_conf_dir = os.path.join(self.get_conf_dir(),
'logback.xml')
common.copy_file(new_logback_config, cassandra_conf_dir)
def update_startup_byteman_script(self, byteman_startup_script):
"""
Update the byteman startup script, i.e., rule injected before the node starts.
:param byteman_startup_script: the relative path to the script
:raise common.LoadError: if the node does not have byteman installed
"""
if self.byteman_port == '0':
raise common.LoadError('Byteman is not installed')
self.byteman_startup_script = byteman_startup_script
self.import_config_files()
def clear(self, clear_all=False, only_data=False):
data_dirs = ['data{0}'.format(x) for x in xrange(0, self.cluster.data_dir_count)]
data_dirs.append("commitlogs")
if clear_all:
data_dirs.extend(['saved_caches', 'logs'])
for d in data_dirs:
full_dir = os.path.join(self.get_path(), d)
if only_data and d != "commitlogs":
for dir in os.listdir(full_dir):
keyspace_dir = os.path.join(full_dir, dir)
if os.path.isdir(keyspace_dir) and dir != "system":
for f in os.listdir(keyspace_dir):
table_dir = os.path.join(keyspace_dir, f)
shutil.rmtree(table_dir)
os.mkdir(table_dir)
else:
common.rmdirs(full_dir)
os.mkdir(full_dir)
# Needed for any subdirs stored underneath a data directory.
# Common for hints post CASSANDRA-6230
for dir in self._get_directories():
if not os.path.exists(dir):
os.mkdir(dir)
def run_sstable2json(self, out_file=None, keyspace=None, datafiles=None, column_families=None, keys=None, enumerate_keys=False):
if out_file is None:
out_file = sys.stdout
sstable2json = self._find_cmd('sstable2json')
env = self.get_env()
sstablefiles = self.__gather_sstables(datafiles, keyspace, column_families)
print_(sstablefiles)
for sstablefile in sstablefiles:
print_("-- {0} -----".format(os.path.basename(sstablefile)))
args = [sstable2json, sstablefile]
if enumerate_keys:
args = args + ["-e"]
if keys is not None:
for key in keys:
args = args + ["-k", key]
subprocess.call(args, env=env, stdout=out_file)
print_("")
def run_json2sstable(self, in_file, ks, cf, keyspace=None, datafiles=None, column_families=None, enumerate_keys=False):
json2sstable = self._find_cmd('json2sstable')
env = self.get_env()
sstablefiles = self.__gather_sstables(datafiles, keyspace, column_families)
for sstablefile in sstablefiles:
in_file_name = os.path.abspath(in_file.name)
args = [json2sstable, "-s", "-K", ks, "-c", cf, in_file_name, sstablefile]
subprocess.call(args, env=env)
def run_sstablesplit_process(self, datafiles=None, size=None, keyspace=None, column_families=None,
no_snapshot=False, debug=False):
sstablesplit = self._find_cmd('sstablesplit')
env = self.get_env()
sstablefiles = self.__gather_sstables(datafiles, keyspace, column_families)
processes = []
def do_split(f):
print_("-- {0}-----".format(os.path.basename(f)))
cmd = [sstablesplit]
if size is not None:
cmd += ['-s', str(size)]
if no_snapshot:
cmd.append('--no-snapshot')
if debug:
cmd.append('--debug')
cmd.append(f)
p = subprocess.Popen(cmd, cwd=os.path.join(self.get_install_dir(), 'bin'),
env=env, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
processes.append(p)
for sstablefile in sstablefiles:
do_split(sstablefile)
return processes
def run_sstablesplit(self, datafiles=None, size=None, keyspace=None, column_families=None,
no_snapshot=False, debug=False):
processes = self.run_sstablesplit_process(datafiles, size, keyspace, column_families, no_snapshot, debug)
results = []
for p in processes:
results.append(handle_external_tool_process(p, "sstablesplit"))
return results
def run_sstablemetadata_process(self, datafiles=None, keyspace=None, column_families=None):
cdir = self.get_install_dir()
sstablemetadata = common.join_bin(cdir, os.path.join('tools', 'bin'), 'sstablemetadata')
env = self.get_env()
sstablefiles = self.__gather_sstables(datafiles=datafiles, keyspace=keyspace, columnfamilies=column_families)
cmd = [sstablemetadata]
cmd.extend(sstablefiles)
return subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
def run_sstablemetadata(self, datafiles=None, keyspace=None, column_families=None):
p = self.run_sstablemetadata_process(datafiles, keyspace, column_families)
return handle_external_tool_process(p, "sstablemetadata on keyspace: {}, column_family: {}".format(keyspace, column_families))
def run_sstabledump_process(self, datafiles=None, keyspace=None, column_families=None, keys=None, enumerate_keys=False, command=False):
sstabledump = self._find_cmd('sstabledump')
env = self.get_env()
sstablefiles = self.__gather_sstables(datafiles=datafiles, keyspace=keyspace, columnfamilies=column_families)
processes = []
def do_dump(sstable):
if command:
print_("-- {0} -----".format(os.path.basename(sstable)))
cmd = [sstabledump, sstable]
if enumerate_keys:
cmd.append('-e')
if keys is not None:
for key in keys:
cmd = cmd + ["-k", key]
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
if command:
out, err, rc = handle_external_tool_process(p, "sstabledump")
print_(out)
print_('\n')
else:
processes.append(p)
for sstable in sstablefiles:
do_dump(sstable)
return processes
def run_sstabledump(self, datafiles=None, keyspace=None, column_families=None, keys=None, enumerate_keys=False, command=False):
processes = self.run_sstabledump_process(datafiles, keyspace, column_families, keys, enumerate_keys, command)
results = []
for p in processes:
results.append(handle_external_tool_process(p, "sstabledump"))
return results
def run_sstableexpiredblockers_process(self, keyspace=None, column_family=None):
cdir = self.get_install_dir()
sstableexpiredblockers = common.join_bin(cdir, os.path.join('tools', 'bin'), 'sstableexpiredblockers')
env = self.get_env()
cmd = [sstableexpiredblockers, keyspace, column_family]
return subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
def run_sstableexpiredblockers(self, keyspace=None, column_family=None):
p = self.run_sstableexpiredblockers_process(keyspace=keyspace, column_family=column_family)
if p is not None:
return handle_external_tool_process(p, ['sstableexpiredblockers', keyspace, column_family])
else:
return None, None, None
def run_sstableupgrade_process(self, keyspace=None, column_family=None):
cdir = self.get_install_dir()
sstableupgrade = self.get_tool('sstableupgrade')
env = self.get_env()
cmd = [sstableupgrade, keyspace, column_family]
p = subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
return p
def run_sstableupgrade(self, keyspace=None, column_family=None):
p = self.run_sstableupgrade_process(keyspace, column_family)
if p is not None:
return handle_external_tool_process(p, "sstableupgrade on {} : {}".format(keyspace, column_family))
else:
return None, None, None
def get_sstablespath(self, datafiles=None, keyspace=None, tables=None, **kawrgs):
sstablefiles = self.__gather_sstables(datafiles=datafiles, keyspace=keyspace, columnfamilies=tables)
return sstablefiles
def run_sstablerepairedset_process(self, set_repaired=True, datafiles=None, keyspace=None, column_families=None):
cdir = self.get_install_dir()
sstablerepairedset = common.join_bin(cdir, os.path.join('tools', 'bin'), 'sstablerepairedset')
env = self.get_env()
sstablefiles = self.__gather_sstables(datafiles, keyspace, column_families)
processes = []
for sstable in sstablefiles:
if set_repaired:
cmd = [sstablerepairedset, "--really-set", "--is-repaired", sstable]
else:
cmd = [sstablerepairedset, "--really-set", "--is-unrepaired", sstable]
p = subprocess.Popen(cmd, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
processes.append(p)
return processes
def run_sstablerepairedset(self, set_repaired=True, datafiles=None, keyspace=None, column_families=None):
processes = self.run_sstablerepairedset_process(set_repaired=set_repaired, datafiles=datafiles, keyspace=keyspace, column_families=column_families)
results = []
for p in processes:
results.append(handle_external_tool_process(p, "sstablerepairedset on {} : {}".format(keyspace, column_families)))
return results
def run_sstablelevelreset_process(self, keyspace, cf):
cdir = self.get_install_dir()
sstablelevelreset = common.join_bin(cdir, os.path.join('tools', 'bin'), 'sstablelevelreset')
env = self.get_env()
cmd = [sstablelevelreset, "--really-reset", keyspace, cf]
return subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
def run_sstablelevelreset(self, keyspace, cf):
p = self.run_sstablelevelreset_process(keyspace, cf)
return handle_external_tool_process(p, "sstablelevelreset on {} : {}".format(keyspace, cf))
def run_sstableofflinerelevel_process(self, keyspace, cf, dry_run=False):
cdir = self.get_install_dir()
sstableofflinerelevel = common.join_bin(cdir, os.path.join('tools', 'bin'), 'sstableofflinerelevel')
env = self.get_env()
if dry_run:
cmd = [sstableofflinerelevel, "--dry-run", keyspace, cf]
else:
cmd = [sstableofflinerelevel, keyspace, cf]
return subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
def run_sstableofflinerelevel(self, keyspace, cf, dry_run=False):
p = self.run_sstableofflinerelevel_process(keyspace, cf, dry_run=dry_run)
return handle_external_tool_process(p, "sstableoflinerelevel on {} : {}".format(keyspace, cf))
def run_sstableverify_process(self, keyspace, cf, options=None):
cdir = self.get_install_dir()
sstableverify = common.join_bin(cdir, 'bin', 'sstableverify')
env = self.get_env()
cmd = [sstableverify, keyspace, cf]
if options is not None:
cmd[1:1] = options
return subprocess.Popen(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, env=env)
def run_sstableverify(self, keyspace, cf, options=None):
p = self.run_sstableverify_process(keyspace, cf, options=options)
return handle_external_tool_process(p, "sstableverify on {} : {} with options: {}".format(keyspace, cf, options))
def _find_cmd(self, cmd):
"""
Locates command under cassandra root and fixes permissions if needed
"""
cdir = self.get_install_cassandra_root()
if self.get_base_cassandra_version() >= 2.1:
fcmd = common.join_bin(cdir, os.path.join('tools', 'bin'), cmd)
else:
fcmd = common.join_bin(cdir, 'bin', cmd)
try:
if os.path.exists(fcmd):
os.chmod(fcmd, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
except:
common.warning("Couldn't change permissions to use {0}.".format(cmd))
common.warning("If it didn't work, you will have to do so manually.")
return fcmd
def has_cmd(self, cmd):
"""
Indicates if specified command can be found under cassandra root
"""
return os.path.exists(self._find_cmd(cmd))
def list_keyspaces(self):
keyspaces = os.listdir(os.path.join(self.get_path(), 'data0'))
keyspaces.remove('system')
return keyspaces
def get_sstables_per_data_directory(self, keyspace, column_family):
keyspace_dirs = [os.path.join(self.get_path(), "data{0}".format(x), keyspace) for x in xrange(0, self.cluster.data_dir_count)]
cf_glob = '*'
if column_family:
# account for changes in data dir layout from CASSANDRA-5202
if self.get_base_cassandra_version() < 2.1:
cf_glob = column_family
else:
cf_glob = column_family + '-*'
for keyspace_dir in keyspace_dirs:
if not os.path.exists(keyspace_dir):
raise common.ArgumentError("Unknown keyspace {0}".format(keyspace))
# data directory layout is changed from 1.1
if self.get_base_cassandra_version() < 1.1:
files = [glob.glob(os.path.join(keyspace_dir, "{0}*-Data.db".format(column_family))) for keyspace_dir in keyspace_dirs]
elif self.get_base_cassandra_version() < 2.2:
files = [glob.glob(os.path.join(keyspace_dir, cf_glob, "%s-%s*-Data.db" % (keyspace, column_family))) for keyspace_dir in keyspace_dirs]
else:
files = [glob.glob(os.path.join(keyspace_dir, cf_glob, "*-Data.db")) for keyspace_dir in keyspace_dirs]
for d in files:
for f in d:
if os.path.exists(f.replace('Data.db', 'Compacted')):
files.remove(f)
return files
def get_sstables(self, keyspace, column_family):
return [f for sublist in self.get_sstables_per_data_directory(keyspace, column_family) for f in sublist]
def get_sstables_via_sstableutil(self, keyspace, table, sstabletype='all', oplogs=False, cleanup=False, match='-Data.db'):
env = common.make_cassandra_env(self.get_install_cassandra_root(), self.get_node_cassandra_root())
tool_bin = self.get_tool('sstableutil')
args = [tool_bin, '--type', sstabletype]
if oplogs:
args.extend(['--oplog'])
if cleanup:
args.extend(['--cleanup'])
args.extend([keyspace, table])
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
common.error("""Error invoking sstableutil; returned {code}; args={args}; env={env}
stdout:
{stdout}
stderr:
{stderr}
""".format(code=p.returncode, args=args, env=env, stdout=stdout, stderr=stderr))
raise Exception("Error invoking sstableutil; returned {code}".format(code=p.returncode))
return sorted(filter(lambda s: match in s, stdout.splitlines()))
def stress_process(self, stress_options=None, whitelist=False):
if stress_options is None:
stress_options = []
else:
stress_options = stress_options[:]
stress = common.get_stress_bin(self.get_install_dir())
if self.cluster.cassandra_version() <= '2.1':
stress_options.append('-d')
stress_options.append(self.address())
else:
stress_options.append('-node')
if whitelist:
stress_options.append("whitelist")
stress_options.append(self.address())
# specify used jmx port if not already set
if not [opt for opt in stress_options if opt.startswith('jmx=')]:
stress_options.extend(['-port', 'jmx=' + self.jmx_port])
args = [stress] + stress_options
try:
p = subprocess.Popen(args, cwd=common.parse_path(stress),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
except KeyboardInterrupt:
pass
def stress(self, stress_options=None, whitelist=False):
p = self.stress_process(stress_options=stress_options, whitelist=whitelist)
try:
return handle_external_tool_process(p, ['stress'] + stress_options)
except KeyboardInterrupt:
pass
def shuffle(self, cmd):
cdir = self.get_install_dir()
shuffle = common.join_bin(cdir, 'bin', 'cassandra-shuffle')
host = self.address()
args = [shuffle, '-h', host, '-p', str(self.jmx_port)] + [cmd]
try:
subprocess.call(args)
except KeyboardInterrupt:
pass
def data_size(self, live_data=None):
"""Uses `nodetool info` to get the size of a node's data in KB."""
if live_data is not None:
warnings.warn("The 'live_data' keyword argument is deprecated.",
DeprecationWarning)
output = self.nodetool('info')[0]
return _get_load_from_info_output(output)
def flush(self, options=None):
if options is None:
options = []
args = ["flush"] + options
cmd = ' '.join(args)
self.nodetool(cmd)
def compact(self, options=None):
if options is None:
options = []
args = ["compact"] + options
cmd = ' '.join(args)
self.nodetool(cmd)
def drain(self, block_on_log=False):
mark = self.mark_log()
self.nodetool("drain")
if block_on_log:
self.watch_log_for("DRAINED", from_mark=mark)
def repair(self, options=None):
if options is None:
options = []
args = ["repair"] + options
cmd = ' '.join(args)
return self.nodetool(cmd)
def move(self, new_token):
self.nodetool("move " + str(new_token))
def cleanup(self, options=None):
if options is None:
options = []
args = ["cleanup"] + options
cmd = ' '.join(args)
self.nodetool(cmd)
def decommission(self, force=False):
cmd = 'decommission'
if force:
cmd += " --force"
self.nodetool(cmd)
self.status = Status.DECOMMISSIONED
self._update_config()
def removeToken(self, token):
self.nodetool("removeToken " + str(token))
def import_config_files(self):
self._update_config()
self.copy_config_files()
self.__update_yaml()
# loggers changed > 2.1
if self.get_base_cassandra_version() < 2.1:
self._update_log4j()
else:
self.__update_logback()
self.__update_envfile()
def import_dse_config_files(self):
raise common.ArgumentError('Cannot import DSE configuration files on a Cassandra node')
def copy_config_files(self):
conf_dir = os.path.join(self.get_install_dir(), 'conf')
for name in os.listdir(conf_dir):
filename = os.path.join(conf_dir, name)
if os.path.isfile(filename):
shutil.copy(filename, self.get_conf_dir())
def import_bin_files(self):
bin_dir = os.path.join(self.get_install_dir(), 'bin')
for name in os.listdir(bin_dir):
filename = os.path.join(bin_dir, name)
if os.path.isfile(filename):
shutil.copy(filename, self.get_bin_dir())
common.add_exec_permission(bin_dir, name)
def __clean_bat(self):
# While the Windows specific changes to the batch files to get them to run are
# fairly extensive and thus pretty brittle, all the changes are very unique to
# the needs of ccm and shouldn't be pushed into the main repo.
# Change the nodes to separate jmx ports
bin_dir = os.path.join(self.get_path(), 'bin')
jmx_port_pattern = "-Dcom.sun.management.jmxremote.port="
bat_file = os.path.join(bin_dir, "cassandra.bat")
common.replace_in_file(bat_file, jmx_port_pattern, " " + jmx_port_pattern + self.jmx_port + "^")
# Split binaries from conf
home_pattern = "if NOT DEFINED CASSANDRA_HOME set CASSANDRA_HOME=%CD%"
common.replace_in_file(bat_file, home_pattern, "set CASSANDRA_HOME=" + self.get_install_dir())
classpath_pattern = "set CLASSPATH=\\\"%CASSANDRA_HOME%\\\\conf\\\""
common.replace_in_file(bat_file, classpath_pattern, "set CCM_DIR=\"" + self.get_path() + "\"\nset CLASSPATH=\"%CCM_DIR%\\conf\"")
# escape the double quotes in name of the lib files in the classpath
jar_file_pattern = "do call :append \"%%i\""
for_statement = "for %%i in (\"%CASSANDRA_HOME%\lib\*.jar\")"
common.replace_in_file(bat_file, jar_file_pattern, for_statement + " do call :append \\\"%%i\\\"")
# escape double quotes in java agent path
class_dir_pattern = "-javaagent:"
common.replace_in_file(bat_file, class_dir_pattern, " -javaagent:\\\"%CASSANDRA_HOME%\\lib\\jamm-0.2.5.jar\\\"^")
# escape the double quotes in name of the class directories
class_dir_pattern = "set CASSANDRA_CLASSPATH="
main_classes = "\\\"%CASSANDRA_HOME%\\build\\classes\\main\\\";"
thrift_classes = "\\\"%CASSANDRA_HOME%\\build\\classes\\thrift\\\""
common.replace_in_file(bat_file, class_dir_pattern, "set CASSANDRA_CLASSPATH=%CLASSPATH%;" +
main_classes + thrift_classes)
# background the server process and grab the pid
run_text = "\\\"%JAVA_HOME%\\bin\\java\\\" %JAVA_OPTS% %CASSANDRA_PARAMS% -cp %CASSANDRA_CLASSPATH% \\\"%CASSANDRA_MAIN%\\\""
run_pattern = ".*-cp.*"
common.replace_in_file(bat_file, run_pattern, "wmic process call create \"" + run_text + "\" > \"" +
self.get_path() + "/dirty_pid.tmp\"\n")
# On Windows, remove the VerifyPorts check from cassandra.ps1
if self.cluster.version() >= '2.1':
common.replace_in_file(os.path.join(self.get_path(), 'bin', 'cassandra.ps1'), ' VerifyPortsAreAvailable', '')
# Specifically call the .ps1 file in our node's folder
common.replace_in_file(bat_file, 'powershell /file .*', 'powershell /file "' + os.path.join(self.get_path(), 'bin', 'cassandra.ps1" %*'))
def _save(self):
self.__update_yaml()
# loggers changed > 2.1
if self.get_base_cassandra_version() < 2.1:
self._update_log4j()
else:
self.__update_logback()
self.__update_envfile()
self._update_config()
def _update_config(self):
dir_name = self.get_path()
if not os.path.exists(dir_name):
os.mkdir(dir_name)
for dir in self._get_directories():
os.mkdir(dir)
filename = os.path.join(dir_name, 'node.conf')
values = {
'name': self.name,
'status': self.status,
'auto_bootstrap': self.auto_bootstrap,
'interfaces': self.network_interfaces,
'jmx_port': self.jmx_port,
'config_options': self.__config_options,
'dse_config_options': self._dse_config_options,
'environment_variables': self.__environment_variables
}
if self.pid:
values['pid'] = self.pid
if self.initial_token:
values['initial_token'] = self.initial_token
if self.__install_dir is not None:
values['install_dir'] = self.__install_dir
if self.remote_debug_port:
values['remote_debug_port'] = self.remote_debug_port
if self.byteman_port:
values['byteman_port'] = self.byteman_port
if self.data_center:
values['data_center'] = self.data_center
if self.workloads is not None:
values['workloads'] = self.workloads
with open(filename, 'w') as f:
yaml.safe_dump(values, f)
def __update_yaml(self):
conf_file = os.path.join(self.get_conf_dir(), common.CASSANDRA_CONF)
with open(conf_file, 'r') as f:
data = yaml.load(f)
with open(conf_file, 'r') as f:
yaml_text = f.read()
data['cluster_name'] = self.cluster.name
data['auto_bootstrap'] = self.auto_bootstrap
data['initial_token'] = self.initial_token
if not self.cluster.use_vnodes and self.get_base_cassandra_version() >= 1.2:
data['num_tokens'] = 1
if 'seeds' in data:
# cassandra 0.7
data['seeds'] = self.cluster.get_seeds()
else:
# cassandra 0.8
data['seed_provider'][0]['parameters'][0]['seeds'] = ','.join(self.cluster.get_seeds())
data['listen_address'], data['storage_port'] = self.network_interfaces['storage']
if self.network_interfaces['thrift'] is not None and self.get_base_cassandra_version() < 4:
data['rpc_address'], data['rpc_port'] = self.network_interfaces['thrift']
if self.network_interfaces['binary'] is not None and self.get_base_cassandra_version() >= 1.2:
data['rpc_address'], data['native_transport_port'] = self.network_interfaces['binary']
data['data_file_directories'] = [os.path.join(self.get_path(), 'data{0}'.format(x)) for x in xrange(0, self.cluster.data_dir_count)]
data['commitlog_directory'] = os.path.join(self.get_path(), 'commitlogs')
data['saved_caches_directory'] = os.path.join(self.get_path(), 'saved_caches')
if self.get_cassandra_version() > '3.0' and 'hints_directory' in yaml_text:
data['hints_directory'] = os.path.join(self.get_path(), 'hints')
if self.get_cassandra_version() >= '3.8':
data['cdc_raw_directory'] = os.path.join(self.get_path(), 'cdc_raw')
if self.cluster.partitioner:
data['partitioner'] = self.cluster.partitioner
# Get a map of combined cluster and node configuration with the node
# configuration taking precedence.
full_options = common.merge_configuration(
self.cluster._config_options,
self.__config_options, delete_empty=False)
# Merge options with original yaml data.
data = common.merge_configuration(data, full_options)
with open(conf_file, 'w') as f:
yaml.safe_dump(data, f, default_flow_style=False)
def _update_log4j(self):
append_pattern = 'log4j.appender.R.File='
conf_file = os.path.join(self.get_conf_dir(), common.LOG4J_CONF)
log_file = os.path.join(self.get_path(), 'logs', 'system.log')
# log4j isn't partial to Windows \. I can't imagine why not.
if common.is_win():
log_file = re.sub("\\\\", "/", log_file)
common.replace_in_file(conf_file, append_pattern, append_pattern + log_file)
# Setting the right log level
# Replace the global log level
if self.__global_log_level is not None:
append_pattern = 'log4j.rootLogger='
common.replace_in_file(conf_file, append_pattern, append_pattern + self.__global_log_level + ',stdout,R')
# Class specific log levels
for class_name in self.__classes_log_level:
logger_pattern = 'log4j.logger'
full_logger_pattern = logger_pattern + '.' + class_name + '='
common.replace_or_add_into_file_tail(conf_file, full_logger_pattern, full_logger_pattern + self.__classes_log_level[class_name])
def __update_logback(self):
conf_file = os.path.join(self.get_conf_dir(), common.LOGBACK_CONF)
self.__update_logback_loglevel(conf_file)
tools_conf_file = os.path.join(self.get_conf_dir(), common.LOGBACK_TOOLS_CONF)
self.__update_logback_loglevel(tools_conf_file)
def __update_logback_loglevel(self, conf_file):
# Setting the right log level - 2.2.2 introduced new debug log
if self.get_cassandra_version() >= '2.2.2' and self.__global_log_level:
if self.__global_log_level in ['DEBUG', 'TRACE']:
root_log_level = self.__global_log_level
cassandra_log_level = self.__global_log_level
elif self.__global_log_level == 'INFO':
root_log_level = self.__global_log_level
cassandra_log_level = 'DEBUG'
elif self.__global_log_level in ['WARN', 'ERROR']:
root_log_level = 'INFO'
cassandra_log_level = 'DEBUG'
system_log_filter_pattern = '<level>.*</level>'
common.replace_in_file(conf_file, system_log_filter_pattern, ' <level>' + self.__global_log_level + '</level>')
elif self.__global_log_level == 'OFF':
root_log_level = self.__global_log_level
cassandra_log_level = self.__global_log_level
cassandra_append_pattern = '<logger name="org.apache.cassandra" level=".*"/>'
common.replace_in_file(conf_file, cassandra_append_pattern, ' <logger name="org.apache.cassandra" level="' + cassandra_log_level + '"/>')
else:
root_log_level = self.__global_log_level
# Replace the global log level and org.apache.cassandra log level
if self.__global_log_level is not None:
root_append_pattern = '<root level=".*">'
common.replace_in_file(conf_file, root_append_pattern, '<root level="' + root_log_level + '">')
# Class specific log levels
for class_name in self.__classes_log_level:
logger_pattern = '\t<logger name="'
full_logger_pattern = logger_pattern + class_name + '" level=".*"/>'
common.replace_or_add_into_file_tail(conf_file, full_logger_pattern, logger_pattern + class_name + '" level="' + self.__classes_log_level[class_name] + '"/>')
def __update_envfile(self):
agentlib_setting = '-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address={}'.format(str(self.remote_debug_port))
remote_debug_options = agentlib_setting
# The cassandra-env.ps1 file has been introduced in 2.1
if common.is_modern_windows_install(self.get_base_cassandra_version()):
conf_file = os.path.join(self.get_conf_dir(), common.CASSANDRA_WIN_ENV)
jvm_file = os.path.join(self.get_conf_dir(), common.JVM_OPTS)
jmx_port_pattern = '^\s+\$JMX_PORT='
jmx_port_setting = ' $JMX_PORT="' + self.jmx_port + '"'
if self.get_cassandra_version() < '3.2':
remote_debug_options = ' $env:JVM_OPTS="$env:JVM_OPTS {}"'.format(agentlib_setting)
else:
conf_file = os.path.join(self.get_conf_dir(), common.CASSANDRA_ENV)
jvm_file = os.path.join(self.get_conf_dir(), common.JVM_OPTS)
jmx_port_pattern = 'JMX_PORT='
jmx_port_setting = 'JMX_PORT="' + self.jmx_port + '"'
if self.get_cassandra_version() < '3.2':
remote_debug_options = 'JVM_OPTS="$JVM_OPTS {}"'.format(agentlib_setting)
common.replace_in_file(conf_file, jmx_port_pattern, jmx_port_setting)
if common.is_modern_windows_install(common.get_version_from_build(node_path=self.get_path())):
dst = os.path.join(self.get_conf_dir(), common.CASSANDRA_WIN_ENV)
replacements = [
('env:CASSANDRA_HOME =', ' $env:CASSANDRA_HOME="%s"' % self.get_install_dir()),
('env:CASSANDRA_CONF =', ' $env:CCM_DIR="' + self.get_path() + '\\conf"\n $env:CASSANDRA_CONF="$env:CCM_DIR"'),
('cp = ".*?env:CASSANDRA_HOME.conf', ' $cp = """$env:CASSANDRA_CONF"""')
]
common.replaces_in_file(dst, replacements)
if self.remote_debug_port != '0':
remote_debug_port_pattern = '((-Xrunjdwp:)|(-agentlib:jdwp=))transport=dt_socket,server=y,suspend=n,address='
if self.get_cassandra_version() < '3.2':
common.replace_in_file(conf_file, remote_debug_port_pattern, remote_debug_options)
else:
common.replace_in_file(jvm_file, remote_debug_port_pattern, remote_debug_options)
if self.byteman_port != '0':
byteman_jar = glob.glob(os.path.join(self.get_install_dir(), 'build', 'lib', 'jars', 'byteman-[0-9]*.jar'))[0]
agent_string = "-javaagent:{}=listener:true,boot:{},port:{}".format(byteman_jar, byteman_jar, str(self.byteman_port))
if self.byteman_startup_script is not None:
agent_string = agent_string + ",script:{}".format(self.byteman_startup_script)
if common.is_modern_windows_install(self.get_base_cassandra_version()):
with open(conf_file, "r+") as conf_rewrite:
conf_lines = conf_rewrite.readlines()
# Remove trailing brace, will be replaced
conf_lines = conf_lines[:-1]
conf_lines.append(" $env:JVM_OPTS=\"$env:JVM_OPTS {}\"\n}}\n".format(agent_string))
conf_rewrite.seek(0)
conf_rewrite.truncate()
conf_rewrite.writelines(conf_lines)
else:
common.replaces_or_add_into_file_tail(conf_file, [('.*byteman.*', "JVM_OPTS=\"$JVM_OPTS {}\"".format(agent_string))], add_config_close=False)
if self.get_cassandra_version() < '2.0.1':
common.replace_in_file(conf_file, "-Xss", ' JVM_OPTS="$JVM_OPTS -Xss228k"')
# gc.log was turned on by default in 2.2.5/3.0.3/3.3
if self.get_cassandra_version() >= '2.2.5':
gc_log_pattern = "-Xloggc"
gc_log_path = os.path.join(self.get_path(), 'logs', 'gc.log')
if common.is_win():
gc_log_setting = ' $env:JVM_OPTS="$env:JVM_OPTS -Xloggc:{}"'.format(gc_log_path)
else:
gc_log_setting = 'JVM_OPTS="$JVM_OPTS -Xloggc:{}"'.format(gc_log_path)
common.replace_in_file(conf_file, gc_log_pattern, gc_log_setting)
for itf in list(self.network_interfaces.values()):
if itf is not None and common.interface_is_ipv6(itf):
if self.get_cassandra_version() < '3.2':
if common.is_win():
common.replace_in_file(conf_file,
'-Djava.net.preferIPv4Stack=true',
'\t$env:JVM_OPTS="$env:JVM_OPTS -Djava.net.preferIPv4Stack=false -Djava.net.preferIPv6Addresses=true"')
else:
common.replace_in_file(conf_file,
'-Djava.net.preferIPv4Stack=true',
'JVM_OPTS="$JVM_OPTS -Djava.net.preferIPv4Stack=false -Djava.net.preferIPv6Addresses=true"')
break
else:
common.replace_in_file(jvm_file, '-Djava.net.preferIPv4Stack=true', '')
break
def __update_status(self):
if self.pid is None:
if self.status == Status.UP or self.status == Status.DECOMMISSIONED:
self.status = Status.DOWN
return
old_status = self.status
# os.kill on windows doesn't allow us to ping a process
if common.is_win():
self.__update_status_win()
else:
try:
os.kill(self.pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# not running
if self.status == Status.UP or self.status == Status.DECOMMISSIONED:
self.status = Status.DOWN
elif err.errno == errno.EPERM:
# no permission to signal this process
if self.status == Status.UP or self.status == Status.DECOMMISSIONED:
self.status = Status.DOWN
else:
# some other error
raise err
else:
if self.status == Status.DOWN or self.status == Status.UNINITIALIZED:
self.status = Status.UP
if not old_status == self.status:
if old_status == Status.UP and self.status == Status.DOWN:
self.pid = None
self._update_config()
def __update_status_win(self):
if self._find_pid_on_windows():
if self.status == Status.DOWN or self.status == Status.UNINITIALIZED:
self.status = Status.UP
else:
self.status = Status.DOWN
def _find_pid_on_windows(self):
found = False
try:
import psutil
found = psutil.pid_exists(self.pid)
except ImportError:
common.warning("psutil not installed. Pid tracking functionality will suffer. See README for details.")
cmd = 'tasklist /fi "PID eq ' + str(self.pid) + '"'
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in proc.stdout:
if re.match("Image", str(line)):
found = True
return found
def _get_directories(self):
dirs = []
for i in ['commitlogs', 'saved_caches', 'logs', 'conf', 'bin', 'hints']:
dirs.append(os.path.join(self.get_path(), i))
for x in xrange(0, self.cluster.data_dir_count):
dirs.append(os.path.join(self.get_path(), 'data{0}'.format(x)))
return dirs
def __get_status_string(self):
if self.status == Status.UNINITIALIZED:
return "{} ({})".format(Status.DOWN, "Not initialized")
else:
return self.status
def __clean_win_pid(self):
start = common.now_ms()
if self.get_base_cassandra_version() >= 2.1:
# Spin for up to 15s waiting for .bat to write the pid file
pidfile = self.get_path() + "/cassandra.pid"
while (not os.path.isfile(pidfile)):
now = common.now_ms()
if (now - start > 15000):
raise Exception('Timed out waiting for pid file.')
else:
time.sleep(.001)
# Spin for up to 10s waiting for .bat to fill the pid file
start = common.now_ms()
while (os.stat(pidfile).st_size == 0):
now = common.now_ms()
if (now - start > 10000):
raise Exception('Timed out waiting for pid file to be filled.')
else:
time.sleep(.001)
else:
try:
# Spin for 500ms waiting for .bat to write the dirty_pid file
while (not os.path.isfile(self.get_path() + "/dirty_pid.tmp")):
now = common.now_ms()
if (now - start > 500):
raise Exception('Timed out waiting for dirty_pid file.')
else:
time.sleep(.001)
with open(self.get_path() + "/dirty_pid.tmp", 'r') as f:
found = False
process_regex = re.compile('ProcessId')
readStart = common.now_ms()
readEnd = common.now_ms()
while (found is False and readEnd - readStart < 500):
line = f.read()
if (line):
m = process_regex.search(line)
if (m):
found = True
linesub = line.split('=')
pidchunk = linesub[1].split(';')
win_pid = pidchunk[0].lstrip()
with open(self.get_path() + "/cassandra.pid", 'w') as pidfile:
found = True
pidfile.write(win_pid)
else:
time.sleep(.001)
readEnd = common.now_ms()
if not found:
raise Exception('Node: %s Failed to find pid in ' +
self.get_path() +
'/dirty_pid.tmp. Manually kill it and check logs - ccm will be out of sync.')
except Exception as e:
common.error("Problem starting " + self.name + " (" + str(e) + ")")
raise Exception('Error while parsing <node>/dirty_pid.tmp in path: ' + self.get_path())
def _delete_old_pid(self):
pidfile = os.path.join(self.get_path(), 'cassandra.pid')
if os.path.isfile(pidfile):
os.remove(pidfile)
def _update_pid(self, process):
pidfile = os.path.join(self.get_path(), 'cassandra.pid')
start = time.time()
while not (os.path.isfile(pidfile) and os.stat(pidfile).st_size > 0):
if (time.time() - start > 30.0):
common.error("Timed out waiting for pidfile to be filled (current time is {})".format(datetime.now()))
break
else:
time.sleep(0.1)
try:
with open(pidfile, 'rb') as f:
if common.is_modern_windows_install(self.get_base_cassandra_version()):
self.pid = int(f.readline().strip().decode('utf-16').strip())
else:
self.pid = int(f.readline().strip())
except IOError as e:
raise NodeError('Problem starting node %s due to %s' % (self.name, e), process)
self.__update_status()
def __gather_sstables(self, datafiles=None, keyspace=None, columnfamilies=None):
files = []
if keyspace is None:
for k in self.list_keyspaces():
files = files + self.get_sstables(k, "")
elif datafiles is None:
if columnfamilies is None:
files = files + self.get_sstables(keyspace, "")
else:
for cf in columnfamilies:
files = files + self.get_sstables(keyspace, cf)
else:
if not columnfamilies or len(columnfamilies) > 1:
raise common.ArgumentError("Exactly one column family must be specified with datafiles")
for x in xrange(0, self.cluster.data_dir_count):
cf_dir = os.path.join(os.path.realpath(self.get_path()), 'data{0}'.format(x), keyspace, columnfamilies[0])
sstables = set()
for datafile in datafiles:
if not os.path.isabs(datafile):
datafile = os.path.join(os.getcwd(), datafile)
if not datafile.startswith(cf_dir + '-') and not datafile.startswith(cf_dir + os.sep):
raise NodeError("File doesn't appear to belong to the specified keyspace and column familily: " + datafile)
sstable = _sstable_regexp.match(os.path.basename(datafile))
if not sstable:
raise NodeError("File doesn't seem to be a valid sstable filename: " + datafile)
sstable = sstable.groupdict()
if not sstable['tmp'] and sstable['number'] not in sstables:
if not os.path.exists(datafile):
raise IOError("File doesn't exist: " + datafile)
sstables.add(sstable['number'])
files.append(datafile)
return files
def _clean_win_jmx(self):
if self.get_base_cassandra_version() >= 2.1:
sh_file = os.path.join(common.CASSANDRA_CONF_DIR, common.CASSANDRA_WIN_ENV)
dst = os.path.join(self.get_path(), sh_file)
common.replace_in_file(dst, "^\s+\$JMX_PORT=", " $JMX_PORT=\"" + self.jmx_port + "\"")
# properly use single and double quotes to count for single quotes in the CASSANDRA_CONF path
common.replace_in_file(
dst,
'CASSANDRA_PARAMS=', ' $env:CASSANDRA_PARAMS=\'-Dcassandra' + # -Dcassandra
' -Dlogback.configurationFile=/"\' + "$env:CASSANDRA_CONF" + \'/logback.xml"\'' + # -Dlogback.configurationFile=/"$env:CASSANDRA_CONF/logback.xml"
' + \' -Dcassandra.config=file:"\' + "///$env:CASSANDRA_CONF" + \'/cassandra.yaml"\'') # -Dcassandra.config=file:"///$env:CASSANDRA_CONF/cassandra.yaml"
def get_conf_option(self, option):
conf_file = os.path.join(self.get_conf_dir(), common.CASSANDRA_CONF)
with open(conf_file, 'r') as f:
data = yaml.load(f)
if option in data:
return data[option]
else:
return None
def pause(self):
try:
import psutil
p = psutil.Process(self.pid)
p.suspend()
except ImportError:
if common.is_win():
common.warning("psutil not installed. Pause functionality will not work properly on Windows.")
else:
os.kill(self.pid, signal.SIGSTOP)
def resume(self):
try:
import psutil
p = psutil.Process(self.pid)
p.resume()
except ImportError:
if common.is_win():
common.warning("psutil not installed. Resume functionality will not work properly on Windows.")
else:
os.kill(self.pid, signal.SIGCONT)
def jstack_process(self, opts=None):
opts = [] if opts is None else opts
jstack_location = os.path.abspath(os.path.join(os.environ['JAVA_HOME'],
'bin',
'jstack'))
jstack_cmd = [jstack_location, '-J-d64'] + opts + [str(self.pid)]
return subprocess.Popen(jstack_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
def jstack(self, opts=None):
p = self.jstack_process(opts=opts)
return handle_external_tool_process(p, ['jstack'] + opts)
def byteman_submit_process(self, opts):
cdir = self.get_install_dir()
byteman_cmd = []
byteman_cmd.append(os.path.join(os.environ['JAVA_HOME'],
'bin',
'java'))
byteman_cmd.append('-cp')
byteman_cmd.append(glob.glob(os.path.join(cdir, 'build', 'lib', 'jars', 'byteman-submit-[0-9]*.jar'))[0])
byteman_cmd.append('org.jboss.byteman.agent.submit.Submit')
byteman_cmd.append('-p')
byteman_cmd.append(self.byteman_port)
byteman_cmd += opts
return subprocess.Popen(byteman_cmd)
def byteman_submit(self, opts):
p = self.byteman_submit_process(opts=opts)
return handle_external_tool_process(p, ['byteman_submit'] + opts)
def data_directories(self):
return [os.path.join(self.get_path(), 'data{0}'.format(x)) for x in xrange(0, self.cluster.data_dir_count)]
def get_sstable_data_files_process(self, ks, table):
env = self.get_env()
args = [self.get_tool('sstableutil'), '--type', 'final', ks, table]
p = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def get_sstable_data_files(self, ks, table):
"""
Read sstable data files by using sstableutil, so we ignore temporary files
"""
p = self.get_sstable_data_files_process(ks=ks, table=table)
out, _, _ = handle_external_tool_process(p, ["sstableutil", '--type', 'final', ks, table])
return sorted(filter(lambda s: s.endswith('-Data.db'), out.splitlines()))
def _get_load_from_info_output(info):
load_lines = [s for s in info.split('\n')
if s.startswith('Load')]
if not len(load_lines) == 1:
msg = ('Expected output from `nodetool info` to contain exactly 1 '
'line starting with "Load". Found:\n') + info
raise RuntimeError(msg)
load_line = load_lines[0].split()
# Don't have access to C* version here, so we need to support both prefix styles
# See CASSANDRA-9692 on Apache JIRA
unit_multipliers = {'KiB': 1,
'KB': 1,
'MiB': 1024,
'MB': 1024,
'GiB': 1024 * 1024,
'GB': 1024 * 1024,
'TiB': 1024 * 1024 * 1024,
'TB': 1024 * 1024 * 1024}
load_num, load_units = load_line[2], load_line[3]
try:
load_mult = unit_multipliers[load_units]
except KeyError:
expected = ', '.join(list(unit_multipliers))
msg = ('Expected `nodetool info` to report load in one of the '
'following units:\n'
' {expected}\n'
'Found:\n'
' {found}').format(expected=expected, found=load_units)
raise RuntimeError(msg)
return float(load_num) * load_mult
def _grep_log_for_errors(log):
except_re = re.compile(r'[Ee]xception|AssertionError')
log_cat_re = re.compile(r'(INFO|DEBUG|WARN|ERROR)')
def log_line_category(line):
match = log_cat_re.search(line)
return match.group(0) if match else None
matches = []
loglines = log.splitlines()
for line_num, line in enumerate(loglines):
found_exception = False
line_category = log_line_category(line)
if line_category == 'ERROR':
matches.append([line])
found_exception = True
elif line_category == 'WARN':
match = except_re.search(line)
if match is not None:
matches.append([line])
found_exception = True
if found_exception:
for next_line_num in range(line_num + 1, len(loglines)):
next_line = loglines[next_line_num]
# if a log line can't be identified, assume continuation of an ERROR/WARN exception
if log_line_category(next_line) is None:
matches[-1].append(next_line)
else:
break
return matches
def handle_external_tool_process(process, cmd_args):
out, err = process.communicate()
rc = process.returncode
if rc != 0:
raise ToolError(cmd_args, rc, out, err)
ret = namedtuple('Subprocess_Return', 'stdout stderr rc')
return ret(stdout=out, stderr=err, rc=rc)
| {
"content_hash": "1ba651bf7f0cdfe08e531b80ba08431a",
"timestamp": "",
"source": "github",
"line_count": 2080,
"max_line_length": 251,
"avg_line_length": 43.55336538461538,
"alnum_prop": 0.5746045412899736,
"repo_name": "tolbertam/ccm",
"id": "ba065e5ce7516ce27dbffa9a89919b59f34ab901",
"size": "90602",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccmlib/node.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "310596"
},
{
"name": "Shell",
"bytes": "2634"
}
],
"symlink_target": ""
} |
import grpc
from mocker.endpoints import grpc_endpoint_pb2 as mocker_dot_endpoints_dot_grpc__endpoint__pb2
class MockServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.UnaryDoSomething = channel.unary_unary(
'/etcdserverpb.MockService/UnaryDoSomething',
request_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.StringMessage.SerializeToString,
response_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.StringMessage.FromString,
)
self.ClientStreamDoSomething = channel.stream_unary(
'/etcdserverpb.MockService/ClientStreamDoSomething',
request_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntMessage.SerializeToString,
response_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntCollectionMessage.FromString,
)
self.ServerSteramDoSomething = channel.unary_stream(
'/etcdserverpb.MockService/ServerSteramDoSomething',
request_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntCollectionMessage.SerializeToString,
response_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntMessage.FromString,
)
class MockServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def UnaryDoSomething(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ClientStreamDoSomething(self, request_iterator, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServerSteramDoSomething(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_MockServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'UnaryDoSomething': grpc.unary_unary_rpc_method_handler(
servicer.UnaryDoSomething,
request_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.StringMessage.FromString,
response_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.StringMessage.SerializeToString,
),
'ClientStreamDoSomething': grpc.stream_unary_rpc_method_handler(
servicer.ClientStreamDoSomething,
request_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntMessage.FromString,
response_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntCollectionMessage.SerializeToString,
),
'ServerSteramDoSomething': grpc.unary_stream_rpc_method_handler(
servicer.ServerSteramDoSomething,
request_deserializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntCollectionMessage.FromString,
response_serializer=mocker_dot_endpoints_dot_grpc__endpoint__pb2.IntMessage.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'etcdserverpb.MockService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| {
"content_hash": "cd0889d6e4e520b3bd6ba2adbaabea0a",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 114,
"avg_line_length": 44.59493670886076,
"alnum_prop": 0.7470905478285552,
"repo_name": "dcos/dcos",
"id": "2c11558c779ca30cf102e85a7ed1890b10ef687e",
"size": "3593",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/adminrouter/extra/src/test-harness/modules/mocker/endpoints/grpc_endpoint_pb2_grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2529"
},
{
"name": "Dockerfile",
"bytes": "9395"
},
{
"name": "Go",
"bytes": "5104"
},
{
"name": "Groovy",
"bytes": "711"
},
{
"name": "HCL",
"bytes": "14047"
},
{
"name": "HTML",
"bytes": "91122"
},
{
"name": "Lua",
"bytes": "200521"
},
{
"name": "Makefile",
"bytes": "8767"
},
{
"name": "PowerShell",
"bytes": "230"
},
{
"name": "Python",
"bytes": "1625906"
},
{
"name": "Shell",
"bytes": "102887"
}
],
"symlink_target": ""
} |
import image, math, pyb, sensor, struct, time
# Parameters #################################################################
uart_baudrate = 115200
MAV_system_id = 1
MAV_component_id = 0x54
MAX_DISTANCE_SENSOR_enable = True
lens_mm = 2.8 # Standard Lens.
lens_to_camera_mm = 22 # Standard Lens.
sensor_w_mm = 3.984 # For OV7725 sensor - see datasheet.
sensor_h_mm = 2.952 # For OV7725 sensor - see datasheet.
# Only tags with a tag ID in the dictionary below will be accepted by this
# code. You may add as many tag IDs to the below dictionary as you want...
# For each tag ID you need to provide then length of the black tag border
# in mm. Any side of the tag black border square will work.
valid_tag_ids = {
0 : 165, # 8.5" x 11" tag black border size in mm
1 : 165, # 8.5" x 11" tag black border size in mm
2 : 165, # 8.5" x 11" tag black border size in mm
}
##############################################################################
# Camera Setup
sensor.reset()
sensor.set_pixformat(sensor.GRAYSCALE)
sensor.set_framesize(sensor.QQVGA)
sensor.skip_frames(time = 2000)
x_res = 160 # QQVGA
y_res = 120 # QQVGA
f_x = (lens_mm / sensor_w_mm) * x_res
f_y = (lens_mm / sensor_h_mm) * y_res
c_x = x_res / 2
c_y = y_res / 2
h_fov = 2 * math.atan((sensor_w_mm / 2) / lens_mm)
v_fov = 2 * math.atan((sensor_h_mm / 2) / lens_mm)
def z_to_mm(z_translation, tag_size): # z_translation is in decimeters...
return (((z_translation * 100) * tag_size) / 165) - lens_to_camera_mm
# Link Setup
uart = pyb.UART(3, uart_baudrate, timeout_char = 1000)
# Helper Stuff
packet_sequence = 0
def checksum(data, extra): # https://github.com/mavlink/c_library_v1/blob/master/checksum.h
output = 0xFFFF
for i in range(len(data)):
tmp = data[i] ^ (output & 0xFF)
tmp = (tmp ^ (tmp << 4)) & 0xFF
output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF
tmp = extra ^ (output & 0xFF)
tmp = (tmp ^ (tmp << 4)) & 0xFF
output = ((output >> 8) ^ (tmp << 8) ^ (tmp << 3) ^ (tmp >> 4)) & 0xFFFF
return output
MAV_DISTANCE_SENSOR_message_id = 132
MAV_DISTANCE_SENSOR_min_distance = 1 # in cm
MAV_DISTANCE_SENSOR_max_distance = 10000 # in cm
MAV_DISTANCE_SENSOR_type = 0 # MAV_DISTANCE_SENSOR_LASER
MAV_DISTANCE_SENSOR_id = 0 # unused
MAV_DISTANCE_SENSOR_orientation = 25 # MAV_SENSOR_ROTATION_PITCH_270
MAV_DISTANCE_SENSOR_covariance = 0 # unused
MAV_DISTANCE_SENSOR_extra_crc = 85
# http://mavlink.org/messages/common#DISTANCE_SENSOR
# https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_distance_sensor.h
def send_distance_sensor_packet(tag, tag_size):
global packet_sequence
temp = struct.pack("<lhhhbbbb",
0,
MAV_DISTANCE_SENSOR_min_distance,
MAV_DISTANCE_SENSOR_max_distance,
min(max(int(z_to_mm(tag.z_translation(), tag_size) / 10), MAV_DISTANCE_SENSOR_min_distance), MAV_DISTANCE_SENSOR_max_distance),
MAV_DISTANCE_SENSOR_type,
MAV_DISTANCE_SENSOR_id,
MAV_DISTANCE_SENSOR_orientation,
MAV_DISTANCE_SENSOR_covariance)
temp = struct.pack("<bbbbb14s",
14,
packet_sequence & 0xFF,
MAV_system_id,
MAV_component_id,
MAV_DISTANCE_SENSOR_message_id,
temp)
temp = struct.pack("<b19sh",
0xFE,
temp,
checksum(temp, MAV_DISTANCE_SENSOR_extra_crc))
packet_sequence += 1
uart.write(temp)
MAV_LANDING_TARGET_message_id = 149
MAV_LANDING_TARGET_min_distance = 1/100 # in meters
MAV_LANDING_TARGET_max_distance = 10000/100 # in meters
MAV_LANDING_TARGET_frame = 8 # MAV_FRAME_BODY_NED
MAV_LANDING_TARGET_extra_crc = 200
# http://mavlink.org/messages/common#LANDING_TARGET
# https://github.com/mavlink/c_library_v1/blob/master/common/mavlink_msg_landing_target.h
def send_landing_target_packet(tag, w, h, tag_size):
global packet_sequence
temp = struct.pack("<qfffffbb",
0,
((tag.cx() / w) - 0.5) * h_fov,
((tag.cy() / h) - 0.5) * v_fov,
min(max(z_to_mm(tag.z_translation(), tag_size) / 1000, MAV_LANDING_TARGET_min_distance), MAV_LANDING_TARGET_max_distance),
0.0,
0.0,
0,
MAV_LANDING_TARGET_frame)
temp = struct.pack("<bbbbb30s",
30,
packet_sequence & 0xFF,
MAV_system_id,
MAV_component_id,
MAV_LANDING_TARGET_message_id,
temp)
temp = struct.pack("<b35sh",
0xFE,
temp,
checksum(temp, MAV_LANDING_TARGET_extra_crc))
packet_sequence += 1
uart.write(temp)
# Main Loop
clock = time.clock()
while(True):
clock.tick()
img = sensor.snapshot()
tags = sorted(img.find_apriltags(fx=f_x, fy=f_y, cx=c_x, cy=c_y), key = lambda x: x.w() * x.h(), reverse = True)
if tags and (tags[0].id() in valid_tag_ids):
if MAX_DISTANCE_SENSOR_enable: send_distance_sensor_packet(tags[0], valid_tag_ids[tags[0].id()])
send_landing_target_packet(tags[0], img.width(), img.height(), valid_tag_ids[tags[0].id()])
img.draw_rectangle(tags[0].rect())
img.draw_cross(tags[0].cx(), tags[0].cy())
print("Distance %f mm - FPS %f" % (z_to_mm(tags[0].z_translation(), valid_tag_ids[tags[0].id()]), clock.fps()))
else:
print("FPS %f" % clock.fps())
| {
"content_hash": "67fb8b197475491513f39c53b28d751c",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 150,
"avg_line_length": 38.48026315789474,
"alnum_prop": 0.5566763549324671,
"repo_name": "iabdalkader/openmv",
"id": "236a0f663f0f085b41fd696047aeca312b182b76",
"size": "6061",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/examples/OpenMV/18-MAVLink/mavlink_apriltags_landing_target.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "569030"
},
{
"name": "C",
"bytes": "100413378"
},
{
"name": "C++",
"bytes": "97780"
},
{
"name": "CMake",
"bytes": "10173"
},
{
"name": "Dockerfile",
"bytes": "874"
},
{
"name": "Makefile",
"bytes": "72669"
},
{
"name": "Python",
"bytes": "1197447"
},
{
"name": "Shell",
"bytes": "3220"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.