gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for trackable object SavedModel save."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import graph_debug_info_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.framework import versions
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.optimizer_v2 import adam
from tensorflow.python.lib.io import file_io
from tensorflow.python.module import module
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.saved_model import loader
from tensorflow.python.saved_model import loader_impl
from tensorflow.python.saved_model import save
from tensorflow.python.saved_model import save_options
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
from tensorflow.python.util import compat
class _ModelWithOptimizer(util.Checkpoint):
def __init__(self):
self.dense = core.Dense(1)
self.optimizer = adam.Adam(0.01)
@def_function.function(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)))
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.dense.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {"loss": loss}
def _import_and_infer(
save_dir, inputs,
signature_key=signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY):
"""Import a SavedModel into a TF 1.x-style graph and run `signature_key`."""
graph = ops.Graph()
with graph.as_default(), session_lib.Session() as session:
model = loader.load(session, [tag_constants.SERVING], save_dir)
signature = model.signature_def[signature_key]
assert set(inputs.keys()) == set(signature.inputs.keys())
feed_dict = {}
for arg_name in inputs.keys():
feed_dict[graph.get_tensor_by_name(signature.inputs[arg_name].name)] = (
inputs[arg_name])
output_dict = {}
for output_name, output_tensor_info in signature.outputs.items():
output_dict[output_name] = graph.get_tensor_by_name(
output_tensor_info.name)
return session.run(output_dict, feed_dict=feed_dict)
class SaveTest(test.TestCase):
def test_method_save_signature(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, root.f)
self.assertEqual(
{"output_0": 2.},
_import_and_infer(save_dir, {"x": 1.}))
def test_method_save_concrete(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda z: {"out": 2. * z})
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
{"non_default_key": root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))})
self.assertEqual(
{"out": 2.},
_import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_method_save_annotated_function(self):
# This test is only meaningful with Python 3 because Python 2's
# inspect.getargspec doesn't save annotations.
root = tracking.AutoTrackable()
class UnknownType(object): # pylint: disable=unused-variable
pass
def annotated_function(z):
return {"out": 2. * z}
# Same effect as annotating function like the following.
# def annotated_function("z": UnknownType) -> UnknownType:
# This is a workaround since Python 2 does not support annotations and
# our presubmit linter catches it.
annotated_function.__annotations__ = {
"z": UnknownType,
"return": UnknownType
}
root.f = def_function.function(annotated_function)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, save_dir, {
"non_default_key":
root.f.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.float32))
})
self.assertEqual({"out": 2.},
_import_and_infer(
save_dir, {"z": 1.}, signature_key="non_default_key"))
def test_unbuilt_model_does_not_prevent_saving(self):
root = util.Checkpoint(model=sequential.Sequential([core.Dense(2)]))
save.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
def test_unsaveable_func_graph(self):
root = module.Module()
@def_function.function(input_signature=[])
def nested_f():
ops.get_default_graph().mark_as_unsaveable("ERROR MSG")
return 1
@def_function.function(input_signature=[])
def f():
return nested_f()
root.f = f
with self.assertRaisesRegexp(ValueError, "ERROR MSG"):
save.save(root, os.path.join(self.get_temp_dir(), "saved_model"))
def test_version_information_included(self):
root = tracking.AutoTrackable()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
saved_model_proto = loader_impl.parse_saved_model(save_dir)
self.assertEqual(
versions.__version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_version)
self.assertEqual(
versions.__git_version__,
saved_model_proto.meta_graphs[0].meta_info_def.tensorflow_git_version)
def test_non_concrete_error(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "Expected a TensorFlow function"):
save.save(root, save_dir, root.f)
def test_captures_unreachable_variable(self):
root = tracking.AutoTrackable()
unreachable_variable = variables.Variable([5.0, 2.0])
root.reachable_variable = variables.Variable([1.0, 3.0])
@def_function.function
def increase_variable(x):
return 2 * unreachable_variable * x + root.reachable_variable
root.f = increase_variable
self.assertAllEqual([101.0, 83.0],
root.f(constant_op.constant([10.0, 20.0])).numpy())
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(KeyError, "not reachable from root"):
save.save(root, save_dir)
def test_nested_inputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x[0],
input_signature=([tensor_spec.TensorSpec(None, dtypes.float32),
tensor_spec.TensorSpec(None, dtypes.float32)],))
root.f([constant_op.constant(1.), constant_op.constant(1.)])
def test_nested_outputs(self):
root = tracking.AutoTrackable()
root.f = def_function.function(lambda x: (2. * x, (3. * x, 4. * x)))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "non-flat outputs"):
save.save(root, save_dir, to_save)
def test_nested_dict_outputs(self):
root = util.Checkpoint(
f=def_function.function(
lambda x: {"a": 2. * x, "b": (3. * x, 4. * x)}))
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(
ValueError, "dictionary containing non-Tensor value"):
save.save(root, save_dir, to_save)
def test_variable(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(
lambda x: root.v1 * root.v2 * x)
root.f(constant_op.constant(1.))
to_save = root.f.get_concrete_function(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir, to_save)
self.assertAllEqual({"output_0": 12.},
_import_and_infer(save_dir, {"x": 2.}))
def test_optimizer(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model = _ModelWithOptimizer()
first_loss = model.call(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir, model.call)
second_loss = model.call(x, y)
self.assertNotEqual(first_loss, second_loss)
self.assertAllClose(
second_loss,
_import_and_infer(save_dir, {"x": [[3., 4.]], "y": [2.]}))
def test_single_method_default_signature(self):
model = _ModelWithOptimizer()
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
model.call(x, y)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertIn("loss",
_import_and_infer(save_dir,
{"x": [[3., 4.]], "y": [2.]}))
def test_single_function_default_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3., input_signature=())
model.f()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
self.assertAllClose({"output_0": 3.},
_import_and_infer(save_dir, {}))
def test_single_function_no_signature(self):
model = tracking.AutoTrackable()
model.f = def_function.function(lambda: 3.)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(model, save_dir)
def test_find_default_save_function(self):
class ObjWithDefaultSignature(util.Checkpoint):
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def _default_save_signature(self, x):
return x + x + 1
obj = ObjWithDefaultSignature()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir)
self.assertAllClose(
{"output_0": 7.}, _import_and_infer(save_dir, {"x": 3.}))
def test_docstring(self):
class Adder(module.Module):
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def add(self, x):
return x + x + 1.
to_save = Adder()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 7.},
_import_and_infer(save_dir, {"x": 3.}))
def test_datastructures(self):
class HasDatastructures(util.Checkpoint):
def __init__(self):
self.a = [1.]
self.a.append(variables.Variable(2.))
self.b = {"a": variables.Variable(3.)}
@def_function.function(input_signature=[tensor_spec.TensorSpec(
shape=None, dtype=dtypes.float32)])
def add(self, x):
return x + math_ops.add_n(self.a) + self.b["a"]
to_save = HasDatastructures()
to_save.add(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
self.assertAllClose({"output_0": 10.},
_import_and_infer(save_dir, {"x": 4.}))
def test_default_attr_stripping(self):
class Complex(util.Checkpoint):
@def_function.function(input_signature=[])
def __call__(self):
return math_ops.complex(
constant_op.constant(1.),
constant_op.constant(2.),
name="complex")
to_save = Complex()
to_save()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(to_save, save_dir)
graph = ops.Graph()
with graph.as_default(), self.session(graph) as session:
loader.load(session, [tag_constants.SERVING], save_dir)
func, = [f for name, f in graph._functions.items() if "call" in name]
complex_node, = [
node for node in func.definition.node_def if node.op == "Complex"]
self.assertNotIn("T", complex_node.attr)
self.assertNotIn("Tout", complex_node.attr)
def test_signature_attribute_reserved(self):
root = util.Checkpoint(signatures=variables.Variable(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
with self.assertRaisesRegexp(ValueError, "del obj.signatures"):
save.save(root, save_dir)
del root.signatures
save.save(root, save_dir)
def test_function_with_captured_dataset(self):
if test_util.is_gpu_available():
self.skipTest("Currently broken when a GPU is available.")
class HasDataset(module.Module):
def __init__(self):
super(HasDataset, self).__init__()
self.dataset = (
dataset_ops.Dataset.range(5)
.map(lambda x: x ** 2))
@def_function.function
def __call__(self, x):
current_sum = array_ops.zeros([], dtype=dtypes.int64)
for element in self.dataset:
current_sum += x * element
return current_sum
root = HasDataset()
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root, save_dir,
signatures=root.__call__.get_concrete_function(
tensor_spec.TensorSpec(None, dtypes.int64)))
self.assertAllClose({"output_0": 3 * (1 + 4 + 9 + 16)},
_import_and_infer(save_dir, {"x": 3}))
def test_variable_args_cannot_be_used_as_signature(self):
@def_function.function(input_signature=[
resource_variable_ops.VariableSpec(shape=[], dtype=dtypes.int32)])
def f(unused_v):
return 1
root = tracking.AutoTrackable()
root.f = f.get_concrete_function()
with self.assertRaisesRegexp(ValueError,
"tf.Variable inputs cannot be exported"):
save.save(root, os.path.join(self.get_temp_dir(), "saved_model"),
signatures=root.f)
def test_export_correct_output_shapes(self):
"""Asserts that nodes are exported with the correct number of output shapes.
After backpropagation rewrite, functions are rewritten with additional
outputs. When exporting to SavedModel, the shapes of the additional outputs
were incorrectly added to the FunctionDef proto (b/133666530).
"""
obj = tracking.AutoTrackable()
obj.v = variables.Variable(2.)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def f(x):
return (math_ops.multiply(obj.v, x),
math_ops.multiply(obj.v, (x+1)),
None)
obj.f = f
@def_function.function(input_signature=[
tensor_spec.TensorSpec(None, dtypes.float32)])
def g(x):
return obj.f(x)[1]
obj.g = g
# After the following lines, the concrete functions of obj.g and obj.f are
# rewritten with many extra outputs.
with backprop.GradientTape():
obj.g(constant_op.constant(3.0))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir, signatures={"g": obj.g})
graph_def = loader_impl.parse_saved_model(save_dir).meta_graphs[0].graph_def
def assert_correct_number_of_output_shapes(node):
if node.op == "StatefulPartitionedCall":
fn_name = node.attr["f"].func.name
if fn_name.startswith("__inference_f"):
self.assertLen(node.attr["_output_shapes"].list.shape, 2)
if fn_name.startswith("__inference_g"):
self.assertLen(node.attr["_output_shapes"].list.shape, 1)
for f in graph_def.library.function:
if(f.signature.name.startswith("__inference_f") or
f.signature.name.startswith("__inference_g")):
for node in f.node_def:
assert_correct_number_of_output_shapes(node)
def test_save_cached_variable(self):
with ops.Graph().as_default(), session_lib.Session() as session:
obj = tracking.AutoTrackable()
obj.v = variables.Variable(2., caching_device=lambda op: op.device)
obj.w = variables.Variable(3.)
session.run([obj.v.initializer, obj.w.initializer])
@def_function.function(input_signature=[])
def f():
return obj.v + obj.w
obj.f = f
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(obj, save_dir, signatures=obj.f)
self.assertAllClose({"output_0": 5}, _import_and_infer(save_dir, {}))
class SavingOptionsTest(test.TestCase):
def testOpNameSpace(self):
# TODO(kathywu): Add test that saves out SavedModel with a custom op when
# the ">" character is allowed in op names.
graph_def = graph_pb2.GraphDef()
text_format.Merge("node { name: 'A' op: 'Test>CustomOp' }",
graph_def)
with self.assertRaisesRegexp(
ValueError, "Attempted to save ops from non-whitelisted namespaces"):
save._verify_ops(graph_def, [])
save._verify_ops(graph_def, ["Test"])
# Test with multiple carrots in op name.
text_format.Merge("node { name: 'A' op: 'Test>>A>CustomOp' }",
graph_def)
with self.assertRaisesRegexp(
ValueError, "Attempted to save ops from non-whitelisted namespaces"):
save._verify_ops(graph_def, [])
save._verify_ops(graph_def, ["Test"])
def test_save_debug_info_enabled(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: math_ops.mul(2., x, name="DEBUG_INFO_OP"),
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
root.f,
options=save_options.SaveOptions(save_debug_info=True))
debug_info_file_name = os.path.join(save_dir, "debug",
"saved_model_debug_info.pb")
self.assertTrue(os.path.exists(debug_info_file_name))
debug_info = graph_debug_info_pb2.GraphDebugInfo()
with open(debug_info_file_name, "rb") as f:
debug_info.ParseFromString(f.read())
# Verify that there is a trace for DEBUG_INFO_OP just to ensure that
# function debug info tracing is nominally functioning.
found_op = False
for key in debug_info.traces.keys():
if key.startswith("DEBUG_INFO_OP@"):
found_op = True
break
self.assertTrue(found_op, "Did not find DEBUG_INFO_OP in trace")
def test_save_debug_info_disabled(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: math_ops.mul(2., x, name="DEBUG_INFO_OP"),
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(
root,
save_dir,
root.f,
options=save_options.SaveOptions(save_debug_info=False))
debug_info_file_name = os.path.join(save_dir, "debug",
"saved_model_debug_info.pb")
self.assertFalse(os.path.exists(debug_info_file_name))
def test_function_aliases(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.f(constant_op.constant(1.))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
options = save_options.SaveOptions(function_aliases={
"my_func": root.f,
})
save.save(root, save_dir, root.f, options=options)
function_cache = list(root.f._stateful_fn._function_cache.all_values())
function_aliases = loader_impl.parse_saved_model(
save_dir).meta_graphs[0].meta_info_def.function_aliases
self.assertLen(function_cache, 1)
self.assertEqual(function_cache[0].name.decode("utf-8"),
list(function_aliases.keys())[0])
class AssetTests(test.TestCase):
def setUp(self):
super(AssetTests, self).setUp()
self._vocab_path = os.path.join(self.get_temp_dir(), "vocab.txt")
with open(self._vocab_path, "w") as f:
f.write("alpha\nbeta\ngamma\n")
def test_asset_path_returned(self):
root = tracking.AutoTrackable()
root.path = tracking.Asset(self._vocab_path)
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
root.get_asset = def_function.function(lambda: root.path.asset_path)
save.save(root, save_dir, signatures=root.get_asset.get_concrete_function())
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
file_io.rename(save_dir, second_dir)
imported_path = _import_and_infer(second_dir, {})["output_0"]
self.assertIn(compat.as_str_any(second_dir),
compat.as_str_any(imported_path))
def test_table(self):
initializer = lookup_ops.TextFileInitializer(
self._vocab_path,
key_dtype=dtypes.string,
key_index=lookup_ops.TextFileIndex.WHOLE_LINE,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.LINE_NUMBER)
root = util.Checkpoint(table=lookup_ops.HashTable(
initializer, default_value=-1))
root.table_user = def_function.function(
root.table.lookup,
input_signature=[tensor_spec.TensorSpec(None, dtypes.string)])
self.assertEqual(
2,
self.evaluate(root.table_user(constant_op.constant("gamma"))))
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(root, save_dir)
file_io.delete_file(self._vocab_path)
self.assertAllClose(
{"output_0": [2, 0]},
_import_and_infer(save_dir, {"keys": ["gamma", "alpha"]}))
second_dir = os.path.join(self.get_temp_dir(), "second_dir")
# Asset paths should track the location the SavedModel is loaded from.
file_io.rename(save_dir, second_dir)
self.assertAllClose(
{"output_0": [2, 1]},
_import_and_infer(second_dir, {"keys": ["gamma", "beta"]}))
def test_unused_asset(self):
root = tracking.AutoTrackable()
root.f = def_function.function(
lambda x: 2. * x,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
root.asset = tracking.Asset(self._vocab_path)
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
save.save(root, export_dir)
self.assertAllClose(
{"output_0": [0.2]},
_import_and_infer(export_dir, {"x": [0.1]}))
def test_sensible_function_building_exception(self):
root = util.Checkpoint(v=variables.Variable(2.))
root.f = def_function.function(
lambda x: 2. * root.v,
input_signature=[tensor_spec.TensorSpec(None, dtypes.float32)])
export_dir = os.path.join(self.get_temp_dir(), "save_dir")
@def_function.function
def _calls_save():
save.save(root, export_dir)
with self.assertRaisesRegexp(AssertionError, "tf.function"):
_calls_save()
class _ModelWithOptimizerUsingDefun(util.Checkpoint):
def __init__(self):
self.dense = core.Dense(1)
self.optimizer = adam.Adam(0.01)
# Using defun due to control flow v2 cycles, b/121159261. def_function uses
# conds to gate variable initialization and so triggers cond reference cycles,
# but the thing being wrapped here does not use cond itself.
@function.defun(
input_signature=(tensor_spec.TensorSpec([None, 2], dtypes.float32),
tensor_spec.TensorSpec([None], dtypes.float32)),
)
def call(self, x, y):
with backprop.GradientTape() as tape:
loss = math_ops.reduce_mean((self.dense(x) - y) ** 2.)
trainable_variables = self.dense.trainable_variables
gradients = tape.gradient(loss, trainable_variables)
self.optimizer.apply_gradients(zip(gradients, trainable_variables))
return {"loss": loss}
class MemoryTests(test.TestCase):
def setUp(self):
self._model = _ModelWithOptimizerUsingDefun()
@test_util.assert_no_garbage_created
def test_no_reference_cycles(self):
x = constant_op.constant([[3., 4.]])
y = constant_op.constant([2.])
self._model.call(x, y)
if sys.version_info[0] < 3:
# TODO(allenl): debug reference cycles in Python 2.x
self.skipTest("This test only works in Python 3+. Reference cycles are "
"created in older Python versions.")
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save.save(self._model, save_dir, self._model.call)
if __name__ == "__main__":
test.main()
| |
"""
Copyright (C) 2017 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
from geometry.arc import Arc
from geometry.point import Point
from geometry.line_segment import LineSegment
from geometry.path import Path
from waypoint import Waypoint
from lane_geometry import LaneGeometry
class LinesAndArcsGeometry(LaneGeometry):
@classmethod
def build_path_and_waypoints(cls, lane, mapped_centers):
if lane.road_nodes_count() < 2:
raise ValueError("At least two nodes are required to build a geometry")
road_nodes = list(lane.road_nodes())
is_circular = mapped_centers[road_nodes[0]].almost_equal_to(mapped_centers[road_nodes[-1]], 5)
path = Path()
waypoints = []
previous_node = road_nodes.pop(0)
previous_point = mapped_centers[previous_node]
nodes_count = len(road_nodes)
for index, node in enumerate(road_nodes):
point = mapped_centers[node]
is_last_node = index + 1 == nodes_count
if is_last_node:
if is_circular:
next_point = mapped_centers[road_nodes[0]]
else:
next_point = None
else:
next_point = mapped_centers[road_nodes[index + 1]]
if path.is_empty():
previous_element_end_point = previous_point
else:
previous_element_end_point = path.element_at(-1).end_point()
if next_point is None:
element = LineSegment(previous_element_end_point, point)
path.add_element(element)
waypoints.append(cls._new_waypoint(lane, element, previous_node))
waypoints.append(cls._new_waypoint(lane, element, road_nodes[-1], False))
else:
previous_vector = point - previous_point
next_vector = next_point - point
if previous_vector.is_collinear_with(next_vector):
element = LineSegment(previous_element_end_point, point)
path.add_element(element)
waypoints.append(cls._new_waypoint(lane, element, previous_node))
if is_last_node:
waypoints.append(cls._new_waypoint(lane, element, road_nodes[0], False))
else:
inverted_previous_segment = LineSegment(point, previous_point)
real_inverted_previous_segment = LineSegment(point, previous_element_end_point)
next_segment = LineSegment(point, next_point)
if is_last_node:
real_next_segment = LineSegment(point, path.element_at(0).end_point())
delta = min(real_inverted_previous_segment.length(), real_next_segment.length(), 5)
else:
delta = min(real_inverted_previous_segment.length(), next_segment.length() / 2.0, 5)
previous_segment_new_end_point = real_inverted_previous_segment.point_at_offset(delta)
next_segment_new_start_point = next_segment.point_at_offset(delta)
previous_segment = LineSegment(previous_element_end_point, previous_segment_new_end_point)
# Try to avoid small segments
if previous_segment.length() < 0.25:
# `- 1e-10` to avoid length overflow due to floating point math
new_delta = delta + previous_segment.length() - 1e-10
if next_segment.length() > new_delta:
previous_segment_new_end_point = real_inverted_previous_segment.point_at_offset(new_delta)
next_segment_new_start_point = next_segment.point_at_offset(new_delta)
previous_segment = LineSegment(previous_element_end_point, previous_segment_new_end_point)
angle_between_vectors = previous_vector.angle(next_vector)
d2 = previous_segment_new_end_point.squared_distance_to(next_segment_new_start_point)
cos = math.cos(math.radians(angle_between_vectors))
radius = math.sqrt(d2 / (2.0 * (1.0 - cos)))
# If there should be no segment, just an arc. Use previous_element_end_point to
# avoid rounding errors and make a perfect overlap
if previous_segment.length() < 1e-8:
if path.not_empty():
heading = path.element_at(-1).end_heading()
else:
heading = inverted_previous_segment.inverted().start_heading()
connection_arc = Arc(previous_element_end_point, heading, radius, angle_between_vectors)
path.add_element(connection_arc)
waypoints.append(cls._new_waypoint(lane, connection_arc, node))
if not connection_arc.end_point().almost_equal_to(next_segment_new_start_point, 3):
raise RuntimeError("Expecting arc end {0} to match next segment entry point {1}".format(
connection_arc.end_point(),
next_segment_new_start_point))
else:
heading = inverted_previous_segment.inverted().start_heading()
connection_arc = Arc(previous_segment_new_end_point, heading, radius, angle_between_vectors)
path.add_element(previous_segment)
waypoints.append(cls._new_waypoint(lane, previous_segment, previous_node))
path.add_element(connection_arc)
waypoints.append(cls._new_waypoint(lane, connection_arc, node))
if not connection_arc.end_point().almost_equal_to(next_segment_new_start_point, 3):
raise RuntimeError("Expecting arc end {0} to match next segment entry point {1}".format(
connection_arc.end_point(),
next_segment_new_start_point))
if is_last_node:
if connection_arc.end_point().distance_to(path.element_at(1).start_point()) < 1e-8:
path.remove_first_element()
waypoints.pop(0)
else:
first_element = path.element_at(0)
new_first_element = LineSegment(connection_arc.end_point(), first_element.end_point())
path.replace_first_element(new_first_element)
waypoints[0] = cls._new_waypoint(lane, new_first_element, lane.road_nodes()[0])
waypoints.append(cls._new_waypoint(lane, connection_arc, road_nodes[0], False))
previous_node = node
previous_point = mapped_centers[previous_node]
return (path, waypoints)
@classmethod
def connect(cls, exit_waypoint, entry_waypoint):
if abs(exit_waypoint.heading() - entry_waypoint.heading()) < 1e-3:
waypoints_angle = math.degrees(exit_waypoint.center().yaw(entry_waypoint.center()))
if abs(exit_waypoint.heading() - waypoints_angle) < 1e-3:
# Waypoints are in collinear lanes and can be connected by a line
# segment with the same heading
return LineSegment(exit_waypoint.center(), entry_waypoint.center())
else:
# Waypoints are in collinear lanes but have different offsets,
# so they must be connected by an S-shaped path
exit_line = exit_waypoint.defining_line()
entry_line = entry_waypoint.defining_line()
cutting_line = entry_line.perpendicular_line_at(entry_waypoint.center())
delta_length = (exit_waypoint.center() - exit_line.intersection(cutting_line)).norm()
segment_extension = delta_length / 5.0
exit_extension = LineSegment.from_point_and_heading(exit_waypoint.center(),
exit_waypoint.heading(),
segment_extension)
entry_extension = LineSegment.from_point_and_heading(entry_waypoint.center(),
entry_waypoint.heading() + 180,
segment_extension)
connecting_segment = LineSegment(exit_extension.end_point(), entry_extension.end_point())
connecting_segment = connecting_segment.extended_by(-segment_extension).inverted()
connecting_segment = connecting_segment.extended_by(-segment_extension).inverted()
start_arc = cls._build_arc(exit_waypoint.center(),
exit_waypoint.heading(),
connecting_segment.start_point(),
connecting_segment.start_heading())
end_arc = cls._build_arc(connecting_segment.end_point(),
connecting_segment.end_heading(),
entry_waypoint.center(),
entry_waypoint.heading())
path = Path()
path.add_element(start_arc)
path.add_element(connecting_segment)
path.add_element(end_arc)
return path
else:
exit_point = exit_waypoint.center()
exit_line = exit_waypoint.defining_line()
entry_point = entry_waypoint.center()
entry_line = entry_waypoint.defining_line()
intersection = exit_line.intersection(entry_line)
exit_distance = exit_point.distance_to(intersection)
entry_distance = entry_point.distance_to(intersection)
path = None
delta = abs(exit_distance - entry_distance)
if delta > 1e-1:
path = Path()
if exit_distance > entry_distance:
end_point = exit_point + (exit_waypoint.heading_vector() * delta)
segment = LineSegment(exit_point, end_point)
arc = cls._build_arc(end_point, exit_waypoint.heading(), entry_point, entry_waypoint.heading())
path.add_element(segment)
path.add_element(arc)
else:
start_point = entry_point - (entry_waypoint.heading_vector() * delta)
arc = cls._build_arc(exit_point, exit_waypoint.heading(), start_point, entry_waypoint.heading())
segment = LineSegment(arc.end_point(), entry_point)
path.add_element(arc)
path.add_element(segment)
return path
else:
return cls._build_arc(exit_point, exit_waypoint.heading(), entry_point, entry_waypoint.heading())
@classmethod
def _build_arc(cls, start_point, start_heading, end_point, end_heading):
angle_in_degrees = end_heading - start_heading
d2 = end_point.squared_distance_to(start_point)
cos = math.cos(math.radians(angle_in_degrees))
radius = math.sqrt(d2 / (2 * (1 - cos)))
# Keep the angle in the [-180, 180) range
if angle_in_degrees >= 180:
angle_in_degrees = angle_in_degrees - 360
if angle_in_degrees < -180:
angle_in_degrees = angle_in_degrees + 360
return Arc(start_point, start_heading, radius, angle_in_degrees)
| |
#adapted from the example at http://scikit-learn.org/stable/auto_examples/svm/plot_rbf_parameters.html
"""
This script can be used to get the p value for classifiers. It takes input files with column vectors corresponding to features and lables.
Then there are two different routes one can go down. When mode has a value of 1, then a grid search will be performed on
one set of input files. If it is 2, then the hyperparemeter search is performed by spearmint. When the mode is turned off (0),
then the p value is computed for multiple sets of input files and the p value distribution is plotted. One sets all the valiables
including the classifier in the "args" list. The classifier provided is ignored if keras_mode is on (1) in which case a keras neural
network is used.
"""
from __future__ import print_function
print(__doc__)
import os
import p_value_scoring_object
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
from sklearn import cross_validation
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris
from sklearn.cross_validation import StratifiedShuffleSplit
from sklearn.grid_search import GridSearchCV
from scipy import stats
import math
##############################################################################
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
class Counter(object):
# Creating a counter object to be able to perform cross validation with only one split
def __init__(self, list1,list2):
self.current = 1
self.list1 =list1
self.list2 =list2
def __iter__(self):
'Returns itself as an iterator object'
return self
def __next__(self):
'Returns the next value till current is lower than high'
if self.current > 1:
raise StopIteration
else:
self.current += 1
return self.list1,self.list2
next = __next__ #python2
def histo_plot_pvalue(U_0,abins,axlabel,aylabel,atitle,aname):
bins_probability=np.histogram(U_0,bins=abins)[1]
#Finding the p values corresponding to 1,2 and 3 sigma significance.
no_one_std_dev=sum(i < (1-0.6827) for i in U_0)
no_two_std_dev=sum(i < (1-0.9545) for i in U_0)
no_three_std_dev=sum(i < (1-0.9973) for i in U_0)
print(no_one_std_dev,no_two_std_dev,no_three_std_dev)
#plt.rc('text', usetex=True)
textstr = '$1\sigma=%i$\n$2\sigma=%i$\n$3\sigma=%i$'%(no_one_std_dev, no_two_std_dev, no_three_std_dev)
# Making a histogram of the probability predictions of the algorithm.
fig_pred_0= plt.figure()
ax1_pred_0= fig_pred_0.add_subplot(1, 1, 1)
n0, bins0, patches0 = ax1_pred_0.hist(U_0, bins=bins_probability, facecolor='red', alpha=0.5)
ax1_pred_0.set_xlabel(axlabel)
ax1_pred_0.set_ylabel(aylabel)
ax1_pred_0.set_title(atitle)
plt.xlim([0,1])
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
# place a text box in upper left in axes coords
ax1_pred_0.text(0.85, 0.95, textstr, transform=ax1_pred_0.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
fig_pred_0.savefig(aname+".png")
#fig_pred_0.show()
plt.close(fig_pred_0)
def classifier_eval(mode,keras_mode,args):
##############################################################################
# Setting parameters
#
name=args[0]
sample1_name= args[1]
sample2_name= args[2]
shuffling_seed = args[3]
#mode =0 if you want evaluation of a model =1 if grid hyperparameter search =2 if spearmint hyperparameter search
comp_file_list=args[4]
print(comp_file_list)
cv_n_iter = args[5]
clf = args[6]
C_range = args[7]
gamma_range = args[8]
if mode==0:
#For standard evaluation
score_list=[]
print("standard evaluation mode")
elif mode==1:
#For grid search
print("grid hyperparameter search mode")
param_grid = dict(gamma=gamma_range, C=C_range)
elif mode==2:
#For spearmint hyperparameter search
print("spearmint hyperparameter search mode")
else:
print("No valid mode chosen")
return 1
##############################################################################
# Load and prepare data set
#
# dataset for grid search
for comp_file_0,comp_file_1 in comp_file_list:
print("Operating of files :"+comp_file_0+" "+comp_file_1)
#extracts data from the files
features_0=np.loadtxt(comp_file_0,dtype='d')
features_1=np.loadtxt(comp_file_1,dtype='d')
#determine how many data points are in each sample
no_0=features_0.shape[0]
no_1=features_1.shape[0]
no_tot=no_0+no_1
#Give all samples in file 0 the label 0 and in file 1 the feature 1
label_0=np.zeros((no_0,1))
label_1=np.ones((no_1,1))
#Create an array containing samples and features.
data_0=np.c_[features_0,label_0]
data_1=np.c_[features_1,label_1]
data=np.r_[data_0,data_1]
np.random.shuffle(data)
X=data[:,:-1]
y=data[:,-1]
print("X : ",X)
print("y : ",y)
atest_size=0.2
if cv_n_iter==1:
train_range = range(int(math.floor(no_tot*(1-atest_size))))
test_range = range(int(math.ceil(no_tot*(1-atest_size))),no_tot)
#print("train_range : ", train_range)
#print("test_range : ", test_range)
acv = Counter(train_range,test_range)
#print(acv)
else:
acv = StratifiedShuffleSplit(y, n_iter=cv_n_iter, test_size=atest_size, random_state=42)
print("Finished with setting up samples")
# It is usually a good idea to scale the data for SVM training.
# We are cheating a bit in this example in scaling all of the data,
# instead of fitting the transformation on the training set and
# just applying it on the test set.
scaler = StandardScaler()
X = scaler.fit_transform(X)
if mode==1:
##############################################################################
# Grid Search
#
# Train classifiers
#
# For an initial search, a logarithmic grid with basis
# 10 is often helpful. Using a basis of 2, a finer
# tuning can be achieved but at a much higher cost.
grid = GridSearchCV(clf, scoring=p_value_scoring_object.p_value_scoring_object ,param_grid=param_grid, cv=acv)
grid.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid.best_params_, grid.best_score_))
# Now we need to fit a classifier for all parameters in the 2d version
# (we use a smaller set of parameters here because it takes a while to train)
C_2d_range = [1e-2, 1, 1e2]
gamma_2d_range = [1e-1, 1, 1e1]
classifiers = []
for C in C_2d_range:
for gamma in gamma_2d_range:
clf = SVC(C=C, gamma=gamma)
clf.fit(X_2d, y_2d)
classifiers.append((C, gamma, clf))
##############################################################################
# visualization
#
# draw visualization of parameter effects
plt.figure(figsize=(8, 6))
xx, yy = np.meshgrid(np.linspace(-3, 3, 200), np.linspace(-3, 3, 200))
for (k, (C, gamma, clf)) in enumerate(classifiers):
# evaluate decision function in a grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# visualize decision function for these parameters
plt.subplot(len(C_2d_range), len(gamma_2d_range), k + 1)
plt.title("gamma=10^%d, C=10^%d" % (np.log10(gamma), np.log10(C)),size='medium')
# visualize parameter's effect on decision function
plt.pcolormesh(xx, yy, -Z, cmap=plt.cm.RdBu)
plt.scatter(X_2d[:, 0], X_2d[:, 1], c=y_2d, cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.axis('tight')
plt.savefig('prediction_comparison.png')
# plot the scores of the grid
# grid_scores_ contains parameter settings and scores
# We extract just the scores
scores = [x[1] for x in grid.grid_scores_]
scores = np.array(scores).reshape(len(C_range), len(gamma_range))
# Draw heatmap of the validation accuracy as a function of gamma and C
#
# The score are encoded as colors with the hot colormap which varies from dark
# red to bright yellow. As the most interesting scores are all located in the
# 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so
# as to make it easier to visualize the small variations of score values in the
# interesting range while not brutally collapsing all the low score values to
# the same color.
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.hot,
norm=MidpointNormalize(vmin=-1.0, midpoint=-0.0001))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.savefig('Heat_map.png')
else:
if keras_mode==1:
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.layers import Dropout
from keras.utils import np_utils, generic_utils
dimof_input = X.shape[1]
dimof_output =1
print("dimof_input : ",dimof_input, "dimof_output : ", dimof_output)
#y = np_utils.to_categorical(y, dimof_output)
scores = []
counter = 1
for train_index, test_index in acv:
print("Cross validation run ", counter)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print("X_train : ",X_train)
print("y_train : ",y_train)
batch_size = 1
dimof_middle = 100
dropout = 0.5
countof_epoch = 5
n_hidden_layers =4
model = Sequential()
model.add(Dense(input_dim=dimof_input, output_dim=dimof_middle, init="glorot_uniform",activation='tanh'))
model.add(Dropout(dropout))
for n in range(n_hidden_layers):
model.add(Dense(input_dim=dimof_middle, output_dim=dimof_middle, init="glorot_uniform",activation='tanh'))
model.add(Dropout(dropout))
model.add(Dense(input_dim=dimof_middle, output_dim=dimof_output, init="glorot_uniform",activation='sigmoid'))
print("Does it make it here?")
#Compiling (might take longer)
model.compile(loss='categorical_crossentropy', optimizer='sgd')
model.fit(X_train, y_train,show_accuracy=True,batch_size=batch_size, nb_epoch=countof_epoch, verbose=0)
prob_pred = model.predict_proba(X_test)
print("prob_pred : ", prob_pred)
assert (not (np.isnan(np.sum(prob_pred))))
#Just like in p_value_scoring_strategy.py
y_test = np.reshape(y_test,(1,y_test.shape[0]))
prob_pred = np.reshape(prob_pred,(1,prob_pred.shape[0]))
prob_0 = prob_pred[np.logical_or.reduce([y_test==0])]
prob_1 = prob_pred[np.logical_or.reduce([y_test==1])]
if __debug__:
print("Plot")
p_KS=stats.ks_2samp(prob_0,prob_1)
print(p_KS)
scores.append(p_KS[1])
counter +=1
else:
scores = cross_validation.cross_val_score(clf,X,y,cv=acv,scoring=p_value_scoring_object.p_value_scoring_object)
print(scores)
score_list.append(np.mean(scores))
if mode==2:
return (-1)* np.mean(scores)
############################################################################################################################################################
############################################################### Evaluation of results ####################################################################
############################################################################################################################################################
if mode==0:
# The score list has been computed. Let's plot the distribution
print(score_list)
print("I havent implemented plotting of the distribution")
histo_plot_pvalue(score_list,50,"p value","Frequency","p value distribution",name[i]+"_p_value")
if __name__ == "__main__":
print("Executing classifier_eval_simplified as a stand-alone script")
print()
comp_file_list=[]
for i in range(1,10):
comp_file_list.append((os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data.{0}.0.txt".format(i), os.environ['MLToolsDir']+"/Dalitz/dpmodel/data/data.2{0}.1.txt".format(str(i).zfill(2))))
clf = SVC(C=100,gamma=0.1,probability=True, cache_size=7000)
args=["dalitz_svc","particle","antiparticle",100,comp_file_list,1,clf,np.logspace(-2, 10, 13),np.logspace(-9, 3, 13)]
#classifier_eval_simplified(aC,agamma)
classifier_eval(0,0,args)
| |
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Test the "pipeline.py" restricted parallel programming library.
"""
from _common import unittest
from beets.util import pipeline
# Some simple pipeline stages for testing.
def _produce(num=5):
for i in range(num):
yield i
def _work():
i = None
while True:
i = yield i
i *= 2
def _consume(l):
while True:
i = yield
l.append(i)
# A worker that raises an exception.
class TestException(Exception):
pass
def _exc_work(num=3):
i = None
while True:
i = yield i
if i == num:
raise TestException()
i *= 2
# A worker that yields a bubble.
def _bub_work(num=3):
i = None
while True:
i = yield i
if i == num:
i = pipeline.BUBBLE
else:
i *= 2
# Yet another worker that yields multiple messages.
def _multi_work():
i = None
while True:
i = yield i
i = pipeline.multiple([i, -i])
class SimplePipelineTest(unittest.TestCase):
def setUp(self):
self.l = []
self.pl = pipeline.Pipeline((_produce(), _work(), _consume(self.l)))
def test_run_sequential(self):
self.pl.run_sequential()
self.assertEqual(self.l, [0, 2, 4, 6, 8])
def test_run_parallel(self):
self.pl.run_parallel()
self.assertEqual(self.l, [0, 2, 4, 6, 8])
def test_pull(self):
pl = pipeline.Pipeline((_produce(), _work()))
self.assertEqual(list(pl.pull()), [0, 2, 4, 6, 8])
def test_pull_chain(self):
pl = pipeline.Pipeline((_produce(), _work()))
pl2 = pipeline.Pipeline((pl.pull(), _work()))
self.assertEqual(list(pl2.pull()), [0, 4, 8, 12, 16])
class ParallelStageTest(unittest.TestCase):
def setUp(self):
self.l = []
self.pl = pipeline.Pipeline((
_produce(), (_work(), _work()), _consume(self.l)
))
def test_run_sequential(self):
self.pl.run_sequential()
self.assertEqual(self.l, [0, 2, 4, 6, 8])
def test_run_parallel(self):
self.pl.run_parallel()
# Order possibly not preserved; use set equality.
self.assertEqual(set(self.l), set([0, 2, 4, 6, 8]))
def test_pull(self):
pl = pipeline.Pipeline((_produce(), (_work(), _work())))
self.assertEqual(list(pl.pull()), [0, 2, 4, 6, 8])
class ExceptionTest(unittest.TestCase):
def setUp(self):
self.l = []
self.pl = pipeline.Pipeline((_produce(), _exc_work(),
_consume(self.l)))
def test_run_sequential(self):
self.assertRaises(TestException, self.pl.run_sequential)
def test_run_parallel(self):
self.assertRaises(TestException, self.pl.run_parallel)
def test_pull(self):
pl = pipeline.Pipeline((_produce(), _exc_work()))
pull = pl.pull()
for i in range(3):
pull.next()
self.assertRaises(TestException, pull.next)
class ParallelExceptionTest(unittest.TestCase):
def setUp(self):
self.l = []
self.pl = pipeline.Pipeline((
_produce(), (_exc_work(), _exc_work()), _consume(self.l)
))
def test_run_parallel(self):
self.assertRaises(TestException, self.pl.run_parallel)
class ConstrainedThreadedPipelineTest(unittest.TestCase):
def test_constrained(self):
l = []
# Do a "significant" amount of work...
pl = pipeline.Pipeline((_produce(1000), _work(), _consume(l)))
# ... with only a single queue slot.
pl.run_parallel(1)
self.assertEqual(l, [i * 2 for i in range(1000)])
def test_constrained_exception(self):
# Raise an exception in a constrained pipeline.
l = []
pl = pipeline.Pipeline((_produce(1000), _exc_work(), _consume(l)))
self.assertRaises(TestException, pl.run_parallel, 1)
def test_constrained_parallel(self):
l = []
pl = pipeline.Pipeline((
_produce(1000), (_work(), _work()), _consume(l)
))
pl.run_parallel(1)
self.assertEqual(set(l), set(i * 2 for i in range(1000)))
class BubbleTest(unittest.TestCase):
def setUp(self):
self.l = []
self.pl = pipeline.Pipeline((_produce(), _bub_work(),
_consume(self.l)))
def test_run_sequential(self):
self.pl.run_sequential()
self.assertEqual(self.l, [0, 2, 4, 8])
def test_run_parallel(self):
self.pl.run_parallel()
self.assertEqual(self.l, [0, 2, 4, 8])
def test_pull(self):
pl = pipeline.Pipeline((_produce(), _bub_work()))
self.assertEqual(list(pl.pull()), [0, 2, 4, 8])
class MultiMessageTest(unittest.TestCase):
def setUp(self):
self.l = []
self.pl = pipeline.Pipeline((
_produce(), _multi_work(), _consume(self.l)
))
def test_run_sequential(self):
self.pl.run_sequential()
self.assertEqual(self.l, [0, 0, 1, -1, 2, -2, 3, -3, 4, -4])
def test_run_parallel(self):
self.pl.run_parallel()
self.assertEqual(self.l, [0, 0, 1, -1, 2, -2, 3, -3, 4, -4])
def test_pull(self):
pl = pipeline.Pipeline((_produce(), _multi_work()))
self.assertEqual(list(pl.pull()), [0, 0, 1, -1, 2, -2, 3, -3, 4, -4])
class StageDecoratorTest(unittest.TestCase):
def test_stage_decorator(self):
@pipeline.stage
def add(n, i):
return i + n
pl = pipeline.Pipeline([
iter([1, 2, 3]),
add(2)
])
self.assertEqual(list(pl.pull()), [3, 4, 5])
def test_mutator_stage_decorator(self):
@pipeline.mutator_stage
def setkey(key, item):
item[key] = True
pl = pipeline.Pipeline([
iter([{'x': False}, {'a': False}]),
setkey('x'),
])
self.assertEqual(list(pl.pull()),
[{'x': True}, {'a': False, 'x': True}])
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| |
# Copyright 2014-2015 Spectra Logic Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import os
import stat
import sys
import tempfile
import unittest
from ds3.ds3 import *
from ds3.libds3 import LibDs3JobStatus
bucketName = "python_test_bucket"
resources = ["beowulf.txt", "sherlock_holmes.txt", "tale_of_two_cities.txt", "ulysses.txt"]
unicodeResources = [unicode(filename) for filename in resources]
def pathForResource(resourceName):
encoding = sys.getfilesystemencoding()
currentPath = os.path.dirname(unicode(__file__, encoding))
return os.path.join(currentPath, "resources", resourceName)
def populateTestData(client, bucketName, resourceList = None, prefix = "", metadata = None):
if not resourceList:
resourceList = resources
def getSize(fileName):
size = os.stat(pathForResource(fileName)).st_size
return (prefix + fileName, size)
client.putBucket(bucketName)
pathes = {prefix + fileName: pathForResource(fileName) for fileName in resourceList}
fileList = map(getSize, resourceList)
bulkResult = client.putBulk(bucketName, fileList)
for chunk in bulkResult.chunks:
allocateChunk = client.allocateChunk(chunk.chunkId)
for obj in allocateChunk.chunk.objects:
client.putObject(bucketName, obj.name, obj.offset, obj.length, bulkResult.jobId, pathes[obj.name], metadata)
return fileList
def clearBucket(client, bucketName):
bucketContents = client.getBucket(bucketName)
for obj in bucketContents.objects:
client.deleteObject(bucketName, obj.name)
client.deleteBucket(bucketName)
def statusCodeList(status):
return [Ds3Error, lambda obj: obj.statusCode, status]
def typeErrorList(badType):
return [TypeError, str, "expected instance of type basestring, got instance of type " + type(badType).__name__]
def reasonErrorList(reason):
return [Ds3Error, str, reason]
class Ds3TestCase(unittest.TestCase):
def setUp(self):
self.client = createClientFromEnv()
def tearDown(self):
try:
clearBucket(self.client, bucketName)
except Ds3Error as e:
pass
def checkBadInputs(self, testFunction, inputs, second_arg_dict = None):
for test_input, status in inputs.items():
if second_arg_dict:
for arg, second_status in second_arg_dict.items():
if second_status:
try:
testFunction(test_input, arg)
except second_status[0] as e:
self.assertEqual(second_status[1](e), second_status[2])
else:
try:
testFunction(test_input, arg)
except status[0] as e:
self.assertEqual(status[1](e), status[2])
else:
try:
testFunction(test_input)
except status[0] as e:
self.assertEqual(status[1](e), status[2])
class BucketTestCase(Ds3TestCase):
def testPutBucket(self):
"""tests putBucket"""
self.client.putBucket(bucketName)
bucketSet = frozenset(map(lambda service: service.name, self.client.getService()))
self.assertTrue(bucketName in bucketSet)
def testPutBucketUnicode(self):
"""tests putBucket"""
self.client.putBucket(unicode(bucketName))
bucketSet = frozenset(map(lambda service: service.name, self.client.getService()))
self.assertTrue(bucketName in bucketSet)
def testPutBucketBadInput(self):
"""tests putBucket: bad input to function"""
self.client.putBucket(bucketName)
badBuckets = {"": statusCodeList(400), bucketName: statusCodeList(409), 1234: typeErrorList(1234), None:typeErrorList(None)}
self.checkBadInputs(self.client.putBucket, badBuckets)
def testDeleteEmptyBucket(self):
"""tests deleteBucket: deleting an empty bucket"""
self.client.putBucket(bucketName)
self.client.deleteBucket(bucketName)
bucketSet = frozenset(map(lambda service: service.name, self.client.getService()))
self.assertFalse(bucketName in bucketSet)
def testDeleteBucketBadInput(self):
"""tests deleteBucket: bad input to function"""
populateTestData(self.client, bucketName)
badBuckets = {"": statusCodeList(400), bucketName: statusCodeList(409), "not-here": statusCodeList(404), 1234: typeErrorList(1234), None:typeErrorList(None)}
self.checkBadInputs(self.client.deleteBucket, badBuckets)
def testGetEmptyBucket(self):
"""tests getBucket: when bucket is empty"""
self.client.putBucket(bucketName)
bucketContents = self.client.getBucket(bucketName)
self.assertEqual(bucketContents.isTruncated, False)
self.assertEqual(bucketContents.marker, None)
self.assertEqual(bucketContents.delimiter, None)
self.assertEqual(bucketContents.maxKeys, 1000)
self.assertEqual(bucketContents.nextMarker, None)
self.assertEqual(bucketContents.prefix, None)
self.assertEqual(len(bucketContents.commonPrefixes), 0)
self.assertEqual(len(bucketContents.objects), 0)
def testPutBulkUnicode(self):
"""tests getBucket: when bucket has contents"""
fileList = populateTestData(self.client, bucketName, resourceList = unicodeResources)
def testGetFilledBucket(self):
"""tests getBucket: when bucket has contents"""
fileList = populateTestData(self.client, bucketName)
bucketContents = self.client.getBucket(bucketName)
self.assertEqual(bucketContents.isTruncated, False)
self.assertEqual(bucketContents.marker, None)
self.assertEqual(bucketContents.delimiter, None)
self.assertEqual(bucketContents.maxKeys, 1000)
self.assertEqual(bucketContents.nextMarker, None)
self.assertEqual(bucketContents.prefix, None)
self.assertEqual(len(bucketContents.commonPrefixes), 0)
self.assertEqual(len(bucketContents.objects), 4)
returnedFileList = map(lambda obj: (obj.name, obj.size), bucketContents.objects)
self.assertEqual(returnedFileList, fileList)
def testGetBucketBadInput(self):
"""tests getBucket: bad input to function"""
badBuckets = {"": reasonErrorList("Reason: The bucket name parameter is required."), "not-here": statusCodeList(404), 1234: typeErrorList(1234), None:typeErrorList(None)}
self.checkBadInputs(self.client.getBucket, badBuckets)
def testPrefix(self):
"""tests getBucket: prefix parameter"""
populateTestData(self.client, bucketName)
bucketContents = self.client.getBucket(bucketName, prefix = "beo")
self.assertEqual(len(bucketContents.objects), 1)
def testPagination(self):
"""tests getBucket: maxKeys parameter, getBucket: nextMarker parameter"""
fileList = []
for i in xrange(0, 15):
fileList.append(("file" + str(i), 0))
self.client.putBucket(bucketName)
self.client.putBulk(bucketName, fileList)
bucketResult = self.client.getBucket(bucketName, maxKeys = 5)
self.assertEqual(len(bucketResult.objects), 5)
self.assertTrue(bucketResult.nextMarker != None)
self.assertEqual(bucketResult.objects[4].name[4:6], "12")
bucketResult = self.client.getBucket(bucketName, maxKeys = 5, nextMarker = bucketResult.nextMarker)
self.assertEqual(len(bucketResult.objects), 5)
self.assertTrue(bucketResult.nextMarker != None)
self.assertEqual(bucketResult.objects[4].name[4], "4")
bucketResult = self.client.getBucket(bucketName, maxKeys = 5, nextMarker = bucketResult.nextMarker)
self.assertEqual(len(bucketResult.objects), 5)
self.assertTrue(bucketResult.nextMarker == None)
self.assertEqual(bucketResult.objects[4].name[4], "9")
def testDelimiter(self):
"""tests getBucket: delimiter parameter"""
fileList = []
for i in xrange(0, 10):
fileList.append(("dir/file" + str(i), 0))
for i in xrange(0, 10):
fileList.append(("file" + str(i), 0))
self.client.putBucket(bucketName)
self.client.putBulk(bucketName, fileList)
bucketResult = self.client.getBucket(bucketName, delimiter = "/")
self.assertEqual(len(bucketResult.objects), 10)
self.assertEqual(len(bucketResult.commonPrefixes), 1)
self.assertEqual(bucketResult.commonPrefixes[0], "dir/")
def testGetService(self):
"""tests getService"""
servicesBefore = map(lambda service: service.name, frozenset(self.client.getService()))
self.assertFalse(bucketName in servicesBefore)
self.client.putBucket(bucketName)
servicesAfter = map(lambda service: service.name, frozenset(self.client.getService()))
self.assertTrue(bucketName in servicesAfter)
def testHeadBucket(self):
self.client.putBucket(bucketName)
self.client.headBucket(bucketName)
def testHeadBucketBadInput(self):
badBuckets = {"": statusCodeList(400), "not-here": statusCodeList(404), 1234: typeErrorList(1234), None:typeErrorList(None)}
self.checkBadInputs(self.client.headBucket, badBuckets)
class JobTestCase(Ds3TestCase):
def testGetJobs(self):
populateTestData(self.client, bucketName)
bucketContents = self.client.getBucket(bucketName)
bulkGetResult = self.client.getBulk(bucketName, map(lambda obj: obj.name, bucketContents.objects))
result = map(lambda obj: obj.jobId, self.client.getJobs())
self.assertTrue(bulkGetResult.jobId in result)
self.client.deleteJob(bulkGetResult.jobId)
result = map(lambda obj: obj.jobId, self.client.getJobs())
self.assertFalse(bulkGetResult.jobId in result)
class ObjectTestCase(Ds3TestCase):
def validateSearchObjects(self, objects, resourceList = resources, objType = "DATA"):
self.assertEqual(len(objects), len(resourceList))
def getSize(fileName):
size = os.stat(pathForResource(fileName)).st_size
return (fileName, size)
fileList = map(getSize, resourceList)
if len(objects)>0:
self.assertEqual(len(set(map(lambda obj: obj.bucketId, objects))), 1)
for index in xrange(0, len(objects)):
self.assertEqual(objects[index].name, fileList[index][0])
# charlesh: in BP 1.2, size returns 0 (will be fixed in 2.4)
# self.assertEqual(objects[index].size, fileList[index][1])
self.assertEqual(objects[index].type, objType)
self.assertEqual(objects[index].version, "1")
def testDeleteObject(self):
"""tests deleteObject: when object exists"""
populateTestData(self.client, bucketName, resourceList = ["beowulf.txt"])
self.client.deleteObject(bucketName, "beowulf.txt")
bucketContents = self.client.getBucket(bucketName)
self.assertEqual(len(bucketContents.objects), 0)
def testDeleteObjectUnicode(self):
"""tests deleteObject: unicode parameter"""
populateTestData(self.client, bucketName, resourceList = ["beowulf.txt"])
self.client.deleteObject(bucketName, unicode("beowulf.txt"))
bucketContents = self.client.getBucket(bucketName)
self.assertEqual(len(bucketContents.objects), 0)
def testDeleteObjectBadInput(self):
"""tests deleteObject: bad input to function"""
self.client.putBucket(bucketName)
badBuckets = {1234:typeErrorList(1234), None:typeErrorList(None)}
self.checkBadInputs(self.client.deleteObject, badBuckets, second_arg_dict = {"":None, "badFile":None, 1234: None, None:None})
badBuckets = {bucketName: statusCodeList(404), "not-here": statusCodeList(404)}
self.checkBadInputs(self.client.deleteObject, badBuckets, second_arg_dict = {"":None, "badFile":None, 1234: typeErrorList(1234), None:typeErrorList(None)})
badBuckets = {"":reasonErrorList("Reason: The bucket name parameter is required.")}
self.checkBadInputs(self.client.deleteObject, badBuckets, second_arg_dict = {"badFile":None})
def testDeleteObjects(self):
"""tests deleteObjects"""
fileList = populateTestData(self.client, bucketName)
deletedResponse = self.client.deleteObjects(bucketName, map(lambda obj: obj[0], fileList))
bucketContents = self.client.getBucket(bucketName)
self.assertEqual(len(bucketContents.objects), 0)
def testDeleteObjectsUnicode(self):
"""tests deleteObjects: unicode parameter"""
fileList = populateTestData(self.client, bucketName)
deletedResponse = self.client.deleteObjects(bucketName, map(lambda obj: unicode(obj[0]), fileList))
bucketContents = self.client.getBucket(bucketName)
self.assertEqual(len(bucketContents.objects), 0)
def testDeleteObjectsEmpty(self):
"""tests deleteObjects: when list passed is empty"""
self.client.putBucket(bucketName)
try:
self.client.deleteObjects(bucketName, [])
except Ds3Error as e:
self.assertEqual(e.reason, "The bulk command requires a list of objects to process")
def testDeleteBadObjects(self):
"""tests deleteObjects: when bucket is empty"""
self.client.putBucket(bucketName)
self.client.deleteObjects(bucketName, ["not-here", "also-not-here"])
def testDeleteObjectsBadBucket(self):
"""tests deleteObjects: when bucket doesn't exist"""
try:
self.client.deleteObjects(bucketName, ["not-here", "also-not-here"])
except Ds3Error as e:
self.assertEqual(e.statusCode, 404)
def testGetPhysicalPlacement(self):
"""tests getPhysicalPlacement: with an empty file"""
populateTestData(self.client, bucketName)
self.assertEqual(len(self.client.getPhysicalPlacement(bucketName, ["bogus.txt"])), 0)
def testGetPhysicalPlacementBadInput(self):
"""tests getPhysicalPlacement: with non-existent bucket"""
try:
self.client.getPhysicalPlacement(bucketName, ["bogus.txt"])
except Ds3Error as e:
self.assertEqual(e.statusCode, 404)
def testGetPhysicalPlacementFull(self):
"""tests getPhysicalPlacement: with an empty file"""
populateTestData(self.client, bucketName)
self.assertEqual(len(self.client.getPhysicalPlacement(bucketName, ["bogus.txt"], fullDetails = True)), 0)
def testGetPhysicalPlacementFullBadInput(self):
"""tests getPhysicalPlacement: with non-existent bucket"""
try:
self.client.getPhysicalPlacement(bucketName, ["bogus.txt"], fullDetails = True)
except Ds3Error as e:
self.assertEqual(e.statusCode, 404)
def testDeleteFolder(self):
"""tests deleteFolder"""
populateTestData(self.client, bucketName, prefix = "folder/")
self.client.deleteFolder(bucketName, "folder")
bucketResult = self.client.getBucket(bucketName)
self.assertEqual(len(bucketResult.objects), 0)
def testDeleteFolderBadInput(self):
"""tests deleteFolder"""
self.client.putBucket(bucketName)
badBuckets = {"": statusCodeList(404), "fakeBucket": statusCodeList(404), bucketName: statusCodeList(404)}
self.checkBadInputs(self.client.deleteFolder, badBuckets, second_arg_dict = {"folder":None})
def testGetObjects(self):
populateTestData(self.client, bucketName)
objects = self.client.getObjects()
self.validateSearchObjects(objects, resources)
def testGetObjectsBucketName(self):
populateTestData(self.client, bucketName)
objects = self.client.getObjects(bucketName = bucketName)
self.validateSearchObjects(objects, resources)
def testGetObjectsObjectName(self):
populateTestData(self.client, bucketName)
objects = self.client.getObjects(bucketName = bucketName, name = "beowulf.txt")
self.validateSearchObjects(objects, ["beowulf.txt"])
def testGetObjectsPageParameters(self):
populateTestData(self.client, bucketName)
first_half = self.client.getObjects(bucketName = bucketName, pageLength = 2)
self.assertEqual(len(first_half), 2)
second_half = self.client.getObjects(bucketName = bucketName, pageLength = 2, pageOffset = 2)
self.assertEqual(len(second_half), 2)
self.validateSearchObjects(first_half+second_half, resources)
def testGetObjectsType(self):
populateTestData(self.client, bucketName)
objects = self.client.getObjects(bucketName = bucketName, objType = "DATA")
self.validateSearchObjects(objects, resources)
objects = self.client.getObjects(bucketName = bucketName, objType = "FOLDER")
self.validateSearchObjects(objects, [], objType = "FOLDER")
def testGetObjectsVersion(self):
populateTestData(self.client, bucketName)
objects = self.client.getObjects(bucketName = bucketName, version = 1)
self.validateSearchObjects(objects, resources)
def testGetBulkUnicode(self):
"""tests getObject: unicode parameter"""
populateTestData(self.client, bucketName, resourceList = unicodeResources)
bucketContents = self.client.getBucket(bucketName)
bulkGetResult = self.client.getBulk(bucketName, map(lambda obj: unicode(obj.name), bucketContents.objects))
tempFiles = []
availableChunks = self.client.getAvailableChunks(bulkGetResult.jobId)
for obj in availableChunks.bulkPlan.chunks[0].objects:
newFile = tempfile.mkstemp()
tempFiles.append(newFile)
metadata_resp = self.client.getObject(bucketName, obj.name, obj.offset, bulkGetResult.jobId, newFile[1])
for tempFile in tempFiles:
os.close(tempFile[0])
os.remove(tempFile[1])
#jobStatusResponse = self.client.getJob(bulkGetResult.jobId)
#self.assertEqual(jobStatusResponse.status, LibDs3JobStatus.COMPLETED)
class ObjectMetadataTestCase(Ds3TestCase):
def testHeadObject(self):
"""tests headObject"""
metadata = {"name1":["value1"], "name2":"value2", "name3":("value3")}
metadata_check = {"name1":["value1"], "name2":["value2"], "name3":["value3"]}
populateTestData(self.client, bucketName, resourceList = ["beowulf.txt"], metadata = metadata)
metadata_resp = self.client.headObject(bucketName, "beowulf.txt")
self.assertEqual(metadata_check, metadata_resp)
def testHeadObjectBadInput(self):
"""tests headObject: bad input to function"""
metadata = {"name1":["value1"], "name2":"value2", "name3":("value3")}
populateTestData(self.client, bucketName, resourceList = ["beowulf.txt"], metadata = metadata)
badBuckets = {"fakeBucket": statusCodeList(404), bucketName: statusCodeList(404)}
self.checkBadInputs(self.client.headObject, badBuckets, second_arg_dict = {"":reasonErrorList("Reason: The object name parameter is required."), "badFile":None, None:typeErrorList(None), 1234:typeErrorList(1234)})
badBuckets = {None:typeErrorList(None), 1234:typeErrorList(1234)}
self.checkBadInputs(self.client.headObject, badBuckets, second_arg_dict = {"":None, "badFile":None, None:None, 1234:None})
badBuckets = {"": reasonErrorList("Reason: The bucket name parameter is required.")}
self.checkBadInputs(self.client.headObject, badBuckets, second_arg_dict = {"badFile":None, None:typeErrorList(None), 1234:typeErrorList(1234)})
def testGetBulkWithMetadata(self):
"""tests getObject: metadata parameter, putObject:metadata parameter"""
metadata = {"name1":["value1"], "name2":["value2"], "name3":["value3"]}
populateTestData(self.client, bucketName, resourceList = ["beowulf.txt"], metadata = metadata)
bucketContents = self.client.getBucket(bucketName)
bulkGetResult = self.client.getBulk(bucketName, map(lambda obj: obj.name, bucketContents.objects))
tempFiles = []
availableChunks = self.client.getAvailableChunks(bulkGetResult.jobId)
for obj in availableChunks.bulkPlan.chunks[0].objects:
newFile = tempfile.mkstemp()
tempFiles.append(newFile)
metadata_resp = self.client.getObject(bucketName, obj.name, obj.offset, bulkGetResult.jobId, newFile[1])
for tempFile in tempFiles:
os.close(tempFile[0])
os.remove(tempFile[1])
jobStatusResponse = self.client.getJob(bulkGetResult.jobId)
self.assertEqual(metadata, metadata_resp)
#self.assertEqual(jobStatusResponse.status, LibDs3JobStatus.COMPLETED)
class BasicClientTestCase(Ds3TestCase):
def testGetSystemInformation(self):
result = self.client.getSystemInformation()
self.assertNotEqual(result.apiVersion, None)
self.assertNotEqual(result.serialNumber, None)
def testVerifySystemHealth(self):
result = self.client.verifySystemHealth()
self.assertTrue(result.msRequiredToVerifyDataPlannerHealth >= 0)
| |
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Model classes that extend the instances functionality for MySQL instances.
"""
from trove.common import cfg
from trove.common import exception
from trove.common import utils
from trove.common.remote import create_guest_client
from trove.db import get_db_api
from trove.guestagent.db import models as guest_models
from trove.instance import models as base_models
from trove.openstack.common import log as logging
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def persisted_models():
return {'root_enabled_history': RootHistory}
def load_and_verify(context, instance_id):
# Load InstanceServiceStatus to verify if its running
instance = base_models.Instance.load(context, instance_id)
if not instance.is_datastore_running:
raise exception.UnprocessableEntity(
"Instance %s is not ready." % instance.id)
else:
return instance
class User(object):
_data_fields = ['name', 'host', 'password', 'databases']
def __init__(self, name, host, password, databases):
self.name = name
self.host = host
self.password = password
self.databases = databases
@classmethod
def load(cls, context, instance_id, username, hostname):
load_and_verify(context, instance_id)
validate = guest_models.MySQLUser()
validate.name = username
validate.host = hostname
client = create_guest_client(context, instance_id)
found_user = client.get_user(username=username, hostname=hostname)
if not found_user:
return None
database_names = [{'name': db['_name']}
for db in found_user['_databases']]
return cls(found_user['_name'],
found_user['_host'],
found_user['_password'],
database_names)
@classmethod
def create(cls, context, instance_id, users):
# Load InstanceServiceStatus to verify if it's running
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
for user in users:
user_name = user['_name']
host_name = user['_host']
userhost = "%s@%s" % (user_name, host_name)
existing_users, _nadda = Users.load_with_client(
client,
limit=1,
marker=userhost,
include_marker=True)
if (len(existing_users) > 0 and
str(existing_users[0].name) == str(user_name) and
str(existing_users[0].host) == str(host_name)):
raise exception.UserAlreadyExists(name=user_name,
host=host_name)
return client.create_user(users)
@classmethod
def delete(cls, context, instance_id, user):
load_and_verify(context, instance_id)
create_guest_client(context, instance_id).delete_user(user)
@classmethod
def access(cls, context, instance_id, username, hostname):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
databases = client.list_access(username, hostname)
dbs = []
for db in databases:
dbs.append(Schema(name=db['_name'],
collate=db['_collate'],
character_set=db['_character_set']))
return UserAccess(dbs)
@classmethod
def grant(cls, context, instance_id, username, hostname, databases, permissions):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
client.grant_access(username, hostname, databases, permissions)
@classmethod
def revoke(cls, context, instance_id, username, hostname, database):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
client.revoke_access(username, hostname, database)
@classmethod
def change_password(cls, context, instance_id, users):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
change_users = []
for user in users:
change_user = {'name': user.name,
'host': user.host,
'password': user.password,
}
change_users.append(change_user)
client.change_passwords(change_users)
@classmethod
def update_attributes(cls, context, instance_id, username, hostname,
user_attrs):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
user_changed = user_attrs.get('name')
host_changed = user_attrs.get('host')
validate = guest_models.MySQLUser()
if host_changed:
validate.host = host_changed
if user_changed:
validate.name = user_changed
user = user_changed or username
host = host_changed or hostname
userhost = "%s@%s" % (user, host)
if user_changed or host_changed:
existing_users, _nadda = Users.load_with_client(
client,
limit=1,
marker=userhost,
include_marker=True)
if (len(existing_users) > 0 and
existing_users[0].name == user and
existing_users[0].host == host):
raise exception.UserAlreadyExists(name=user,
host=host)
client.update_attributes(username, hostname, user_attrs)
class UserAccess(object):
_data_fields = ['databases']
def __init__(self, databases):
self.databases = databases
class Root(object):
@classmethod
def load(cls, context, instance_id):
load_and_verify(context, instance_id)
# TODO(pdmars): remove the is_root_enabled call from the guest agent,
# just check the database for this information.
# If the root history returns null or raises an exception, the root
# user hasn't been enabled.
try:
root_history = RootHistory.load(context, instance_id)
except exception.NotFound:
return False
if not root_history:
return False
return True
@classmethod
def create(cls, context, instance_id, user):
load_and_verify(context, instance_id)
root = create_guest_client(context, instance_id).enable_root()
root_user = guest_models.RootUser()
root_user.deserialize(root)
RootHistory.create(context, instance_id, user)
return root_user
class RootHistory(object):
_auto_generated_attrs = ['id']
_data_fields = ['instance_id', 'user', 'created']
_table_name = 'root_enabled_history'
def __init__(self, instance_id, user):
self.id = instance_id
self.user = user
self.created = utils.utcnow()
def save(self):
LOG.debug("Saving %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
return get_db_api().save(self)
@classmethod
def load(cls, context, instance_id):
history = get_db_api().find_by(cls, id=instance_id)
return history
@classmethod
def create(cls, context, instance_id, user):
history = cls.load(context, instance_id)
if history is not None:
return history
history = RootHistory(instance_id, user)
return history.save()
def load_via_context(cls, context, instance_id, name=None):
"""Creates guest and fetches pagination arguments from the context."""
load_and_verify(context, instance_id)
limit = int(context.limit or cls.DEFAULT_LIMIT)
limit = cls.DEFAULT_LIMIT if limit > cls.DEFAULT_LIMIT else limit
client = create_guest_client(context, instance_id)
# The REST API standard dictates that we *NEVER* include the marker.
return cls.load_with_client(client=client, limit=limit,
marker=context.marker, include_marker=False,
name=name)
class Users(object):
DEFAULT_LIMIT = CONF.users_page_size
@classmethod
def load(cls, context, instance_id, name=None):
return load_via_context(cls, context, instance_id, name)
@classmethod
def load_with_client(cls, client, limit, marker, include_marker, name=None):
user_list, next_marker = client.list_users(
limit=limit,
marker=marker,
include_marker=include_marker)
model_users = []
ignore_users = CONF.ignore_users
for user in user_list:
mysql_user = guest_models.MySQLUser()
mysql_user.deserialize(user)
if mysql_user.name in ignore_users:
continue
# TODO(hub-cap): databases are not being returned in the
# reference agent
if name and mysql_user.name != name:
continue
dbs = []
for db in mysql_user.databases:
dbs.append({'name': db['_name']})
model_users.append(User(mysql_user.name,
mysql_user.host,
mysql_user.password,
dbs))
return model_users, next_marker
class Schema(object):
_data_fields = ['name', 'collate', 'character_set']
def __init__(self, name, collate, character_set):
self.name = name
self.collate = collate
self.character_set = character_set
@classmethod
def create(cls, context, instance_id, schemas):
load_and_verify(context, instance_id)
client = create_guest_client(context, instance_id)
for schema in schemas:
schema_name = schema['_name']
existing_schema, _nadda = Schemas.load_with_client(
client,
limit=1,
marker=schema_name,
include_marker=True)
if (len(existing_schema) > 0 and
str(existing_schema[0].name) == str(schema_name)):
raise exception.DatabaseAlreadyExists(name=schema_name)
return client.create_database(schemas)
@classmethod
def delete(cls, context, instance_id, schema):
load_and_verify(context, instance_id)
create_guest_client(context, instance_id).delete_database(schema)
class Schemas(object):
DEFAULT_LIMIT = CONF.databases_page_size
@classmethod
def load(cls, context, instance_id):
return load_via_context(cls, context, instance_id)
@classmethod
def load_with_client(cls, client, limit, marker, include_marker, name=None):
schemas, next_marker = client.list_databases(
limit=limit,
marker=marker,
include_marker=include_marker)
model_schemas = []
ignore_dbs = CONF.ignore_dbs
for schema in schemas:
mysql_schema = guest_models.MySQLDatabase()
mysql_schema.deserialize(schema)
if mysql_schema.name in ignore_dbs:
continue
model_schemas.append(Schema(mysql_schema.name,
mysql_schema.collate,
mysql_schema.character_set))
return model_schemas, next_marker
| |
# -*- coding: utf-8 -*-
from .base import BaseMessage
from .records import RecordUpdateMessage, RecordDeleteMessage, RecordCreateMessage
from ..exceptions import PyOrientBadMethodCallException
from ..constants import COMMAND_OP, FIELD_BOOLEAN, FIELD_BYTE, FIELD_CHAR, \
FIELD_INT, FIELD_LONG, FIELD_SHORT, FIELD_STRING, QUERY_SYNC, FIELD_BYTES, \
TX_COMMIT_OP, QUERY_GREMLIN, QUERY_ASYNC, QUERY_CMD, QUERY_TYPES, \
QUERY_SCRIPT
from ..utils import need_connected, need_db_opened, dlog
__author__ = 'Ostico <ostico@gmail.com>'
#
# COMMAND_OP
#
# Executes remote commands:
#
# Request: (mode:byte)(class-name:string)(command-payload-length:int)(command-payload)
# Response:
# - synchronous commands: [(synch-result-type:byte)[(synch-result-content:?)]]+
# - asynchronous commands: [(asynch-result-type:byte)[(asynch-result-content:?)]*]
# (pre-fetched-record-size.md)[(pre-fetched-record)]*+
#
# Where the request:
#
# mode can be 'a' for asynchronous mode and 's' for synchronous mode
# class-name is the class name of the command implementation.
# There are short form for the most common commands:
# q stands for query as idempotent command. It's like passing
# com.orientechnologies.orient.core.sql.query.OSQLSynchQuery
# c stands for command as non-idempotent command (insert, update, etc).
# It's like passing com.orientechnologies.orient.core.sql.OCommandSQL
# s stands for script. It's like passing
# com.orientechnologies.orient.core.command.script.OCommandScript.
# Script commands by using any supported server-side scripting like Javascript command. Since v1.0.
# any other values is the class name. The command will be created via
# reflection using the default constructor and invoking the fromStream() method against it
# command-payload is the command's serialized payload (see Network-Binary-Protocol-Commands)
# Response is different for synchronous and asynchronous request:
# synchronous:
# synch-result-type can be:
# 'n', means null result
# 'r', means single record returned
# 'l', collection of records. The format is:
# an integer to indicate the collection size
# all the records one by one
# 'a', serialized result, a byte[] is sent
# synch-result-content, can only be a record
# pre-fetched-record-size, as the number of pre-fetched records not
# directly part of the result set but joined to it by fetching
# pre-fetched-record as the pre-fetched record content
# asynchronous:
# asynch-result-type can be:
# 0: no records remain to be fetched
# 1: a record is returned as a resultset
# 2: a record is returned as pre-fetched to be loaded in client's cache only.
# It's not part of the result set but the client knows that it's available for later access
# asynch-result-content, can only be a record
#
class CommandMessage(BaseMessage):
def __init__(self, _orient_socket):
super( CommandMessage, self ).__init__(_orient_socket)
self._query = ''
self._limit = 20
self._fetch_plan = '*:0'
self._command_type = QUERY_SYNC
self._mod_byte = 's'
self._append( ( FIELD_BYTE, COMMAND_OP ) )
@need_db_opened
def prepare(self, params=None ):
if isinstance( params, tuple ) or isinstance( params, list ):
try:
self.set_command_type( params[0] )
self._query = params[1]
self._limit = params[2]
self._fetch_plan = params[3]
# callback function use to operate
# over the async fetched records
self.set_callback( params[4] )
except IndexError:
# Use default for non existent indexes
pass
if self._command_type == QUERY_CMD \
or self._command_type == QUERY_SYNC \
or self._command_type == QUERY_SCRIPT \
or self._command_type == QUERY_GREMLIN:
self._mod_byte = 's'
else:
if self._callback is None:
raise PyOrientBadMethodCallException( "No callback was provided.", [])
self._mod_byte = 'a'
_payload_definition = [
( FIELD_STRING, self._command_type ),
( FIELD_STRING, self._query )
]
if self._command_type == QUERY_ASYNC \
or self._command_type == QUERY_SYNC \
or self._command_type == QUERY_GREMLIN:
# a limit specified in a sql string should always override a
# limit parameter pass to prepare()
if ' LIMIT ' not in self._query.upper() or self._command_type == QUERY_GREMLIN:
_payload_definition.append( ( FIELD_INT, self._limit ) )
else:
_payload_definition.append( ( FIELD_INT, -1 ) )
_payload_definition.append( ( FIELD_STRING, self._fetch_plan ) )
if self._command_type == QUERY_SCRIPT:
_payload_definition.insert( 1, ( FIELD_STRING, 'sql' ) )
_payload_definition.append( ( FIELD_INT, 0 ) )
payload = b''.join(
self._encode_field( x ) for x in _payload_definition
)
self._append( ( FIELD_BYTE, self._mod_byte ) )
self._append( ( FIELD_STRING, payload ) )
return super( CommandMessage, self ).prepare()
def fetch_response(self):
# skip execution in case of transaction
if self._orientSocket.in_transaction is True:
return self
# decode header only
super( CommandMessage, self ).fetch_response()
if self._command_type == QUERY_ASYNC:
self._read_async_records()
else:
return self._read_sync()
def set_command_type(self, _command_type):
if _command_type in QUERY_TYPES:
# user choice if present
self._command_type = _command_type
else:
raise PyOrientBadMethodCallException(
_command_type + ' is not a valid command type', []
)
return self
def set_fetch_plan(self, _fetch_plan):
self._fetch_plan = _fetch_plan
return self
def set_query(self, _query):
self._query = _query
return self
def set_limit(self, _limit):
self._limit = _limit
return self
def _read_sync(self):
# type of response
# decode body char with flag continue ( Header already read )
response_type = self._decode_field( FIELD_CHAR )
if not isinstance(response_type, str):
response_type = response_type.decode()
res = []
if response_type == 'n':
self._append( FIELD_CHAR )
super( CommandMessage, self ).fetch_response(True)
# end Line \x00
return None
elif response_type == 'r' or response_type == 'w':
res = [ self._read_record() ]
self._append( FIELD_CHAR )
# end Line \x00
_res = super( CommandMessage, self ).fetch_response(True)
if response_type == 'w':
res = [ res[0].oRecordData['result'] ]
elif response_type == 'a':
self._append( FIELD_STRING )
self._append( FIELD_CHAR )
res = [ super( CommandMessage, self ).fetch_response(True)[0] ]
elif response_type == 'l':
self._append( FIELD_INT )
list_len = super( CommandMessage, self ).fetch_response(True)[0]
for n in range(0, list_len):
res.append( self._read_record() )
# async-result-type can be:
# 0: no records remain to be fetched
# 1: a record is returned as a result set
# 2: a record is returned as pre-fetched to be loaded in client's
# cache only. It's not part of the result set but the client
# knows that it's available for later access
cached_results = self._read_async_records()
# cache = cached_results['cached']
else:
# this should be never happen, used only to debug the protocol
msg = b''
self._orientSocket._socket.setblocking( 0 )
m = self._orientSocket.read(1)
while m != "":
msg += m
m = self._orientSocket.read(1)
return res
def set_callback(self, func):
if hasattr(func, '__call__'):
self._callback = func
else:
raise PyOrientBadMethodCallException( func + " is not a callable "
"function", [])
return self
#
# TX COMMIT
#
# Commits a transaction. This operation flushes all the
# pending changes to the server side.
#
# Request: (tx-id:int)(using-tx-log:byte)(tx-entry)*(0-byte indicating end-of-records)
# tx-entry: (operation-type:byte)(cluster-id:short)
# (cluster-position:long)(record-type:byte)(entry-content)
#
# entry-content for CREATE: (record-content:bytes)
# entry-content for UPDATE: (version:record-version)(content-changed:boolean)(record-content:bytes)
# entry-content for DELETE: (version:record-version)
# Response: (created-record-count:int)[(client-specified-cluster-id:short)
# (client-specified-cluster-position:long)(created-cluster-id:short)
# (created-cluster-position:long)]*(updated-record-count:int)[(updated-cluster-id:short)
# (updated-cluster-position:long)(new-record-version:int)]*(count-of-collection-changes:int)
# [(uuid-most-sig-bits:long)(uuid-least-sig-bits:long)(updated-file-id:long)(updated-page-index:long)
# (updated-page-offset:int)]*
#
# Where:
# tx-id is the Transaction's Id
# use-tx-log tells if the server must use the Transaction
# Log to recover the transaction. 1 = true, 0 = false
# operation-type can be:
# 1, for UPDATES
# 2, for DELETES
# 3, for CREATIONS
#
# record-content depends on the operation type:
# For UPDATED (1): (original-record-version:int)(record-content:bytes)
# For DELETED (2): (original-record-version:int)
# For CREATED (3): (record-content:bytes)
#
# This response contains two parts: a map of 'temporary' client-generated
# record ids to 'real' server-provided record ids for each CREATED record,
# and a map of UPDATED record ids to update record-versions.
#
# Look at Optimistic Transaction to know how temporary RecordIDs are managed.
#
# The last part or response is referred to RidBag management.
# Take a look at the main page for more details.
class _TXCommitMessage(BaseMessage):
def __init__(self, _orient_socket):
super(_TXCommitMessage, self).__init__(_orient_socket)
self._tx_id = -1
self._operation_stack = []
self._pre_operation_records = {}
self._operation_records = {}
self._temp_cluster_position_seq = -2
# order matters
self._append(( FIELD_BYTE, TX_COMMIT_OP ))
self._command = TX_COMMIT_OP
@need_connected
def prepare(self, params=None):
self._append(( FIELD_INT, self.get_transaction_id() ))
self._append(( FIELD_BOOLEAN, True ))
for k, v in enumerate(self._operation_stack):
self._append(( FIELD_BYTE, chr(1) )) # start of records
for field in v:
self._append(field)
self._append(( FIELD_BYTE, chr(0) ))
self._append(( FIELD_STRING, "" ))
return super(_TXCommitMessage, self).prepare()
def send(self):
return super(_TXCommitMessage, self).send()
def fetch_response(self):
# self.dump_streams()
super(_TXCommitMessage, self).fetch_response()
result = {
'created': [],
'updated': [],
'changes': []
}
items = self._decode_field(FIELD_INT)
for x in range(0, items):
# (created-record-count:int)
# [
# (client-specified-cluster-id:short)
# (client-specified-cluster-position:long)
# (created-cluster-id:short)
# (created-cluster-position:long)
# ]*
result['created'].append(
{
'client_c_id': self._decode_field(FIELD_SHORT),
'client_c_pos': self._decode_field(FIELD_LONG),
'created_c_id': self._decode_field(FIELD_SHORT),
'created_c_pos': self._decode_field(FIELD_LONG)
}
)
operation = self._pre_operation_records[
str(result['created'][-1]['client_c_pos'])
]
rid = "#" + str(result['created'][-1]['created_c_id']) + \
":" + str(result['created'][-1]['created_c_pos'])
record = getattr(operation, "_record_content")
record.update(__version=1, __rid=rid)
self._operation_records[rid] = record
items = self._decode_field(FIELD_INT)
for x in range(0, items):
# (updated-record-count:int)
# [
# (updated-cluster-id:short)
# (updated-cluster-position:long)
# (new-record-version:int)
# ]*
result['updated'].append(
{
'updated_c_id': self._decode_field(FIELD_SHORT),
'updated_c_pos': self._decode_field(FIELD_LONG),
'new_version': self._decode_field(FIELD_INT),
}
)
try:
operation = self._pre_operation_records[
str(result['updated'][-1]['updated_c_pos'])
]
record = getattr(operation, "_record_content")
rid = "#" + str(result['updated'][-1]['updated_c_id']) + \
":" + str(result['updated'][-1]['updated_c_pos'])
record.update(
__version=result['updated'][-1]['new_version'],
__rid=rid
)
self._operation_records[rid] = record
except KeyError:
pass
if self.get_protocol() > 23:
items = self._decode_field(FIELD_INT)
for x in range(0, items):
# (count-of-collection-changes:int)
# [
# (uuid-most-sig-bits:long)
# (uuid-least-sig-bits:long)
# (updated-file-id:long)
# (updated-page-index:long)
# (updated-page-offset:int)
# ]*
result['updated'].append(
{
'uuid_high': self._decode_field(FIELD_LONG),
'uuid_low': self._decode_field(FIELD_LONG),
'file_id': self._decode_field(FIELD_LONG),
'page_index': self._decode_field(FIELD_LONG),
'page_offset': self._decode_field(FIELD_INT),
}
)
self.dump_streams()
return self._operation_records # [self._operation_records, result]
def attach(self, operation):
if not isinstance(operation, BaseMessage):
# A Subclass of BaseMessage was expected
raise AssertionError("A subclass of BaseMessage was expected")
if isinstance(operation, RecordUpdateMessage):
o_record_enc = self.get_serializer().encode(getattr(operation, "_record_content"))
self._operation_stack.append((
( FIELD_BYTE, chr(1) ),
( FIELD_SHORT, int(getattr(operation, "_cluster_id")) ),
( FIELD_LONG, int(getattr(operation, "_cluster_position")) ),
( FIELD_BYTE, getattr(operation, "_record_type") ),
( FIELD_INT, int(getattr(operation, "_record_version")) ),
( FIELD_STRING, o_record_enc ),
))
if self.get_protocol() >= 23:
self._operation_stack[-1] = \
self._operation_stack[-1] +\
( ( FIELD_BOOLEAN, bool(getattr(operation, "_update_content") ) ), )
self._pre_operation_records[
str(getattr(operation, "_cluster_position"))
] = operation
elif isinstance(operation, RecordDeleteMessage):
self._operation_stack.append((
( FIELD_BYTE, chr(2) ),
( FIELD_SHORT, int(getattr(operation, "_cluster_id")) ),
( FIELD_LONG, int(getattr(operation, "_cluster_position")) ),
( FIELD_BYTE, getattr(operation, "_record_type") ),
( FIELD_INT, int(getattr(operation, "_record_version")) ),
))
elif isinstance(operation, RecordCreateMessage):
o_record_enc = self.get_serializer().encode(getattr(operation, "_record_content"))
self._operation_stack.append((
( FIELD_BYTE, chr(3) ),
( FIELD_SHORT, int(-1) ),
( FIELD_LONG, int(self._temp_cluster_position_seq) ),
( FIELD_BYTE, getattr(operation, "_record_type") ),
( FIELD_STRING, o_record_enc ),
))
self._pre_operation_records[
str(self._temp_cluster_position_seq)
] = operation
self._temp_cluster_position_seq -= 1
else:
raise PyOrientBadMethodCallException(
"Wrong command type " + operation.__class__.__name__, []
)
return self
def get_transaction_id(self):
if self._tx_id < 0:
from datetime import datetime
my_epoch = datetime(2014, 7, 1)
now = datetime.now()
delta = now - my_epoch
# write in extended mode to make it easy to read
# seconds * 1000000 to get the equivalent microseconds
_sm = ( delta.seconds + delta.days * 24 * 3600 ) * 10 ** 6
_ms = delta.microseconds
_mstime = _sm + _ms
# remove sign
# treat as unsigned even when the INT is signed
# and take 4 Bytes
# ( 32 bit uniqueness is not ensured in any way,
# but is surely unique in this session )
# we need only a transaction unique for this session
# not a real UUID
if _mstime & 0x80000000:
self._tx_id = int(( _mstime - 0x80000000 ) & 0xFFFFFFFF)
else:
self._tx_id = int(_mstime & 0xFFFFFFFF)
return self._tx_id
def begin(self):
self._operation_stack = []
self._pre_operation_records = {}
self._operation_records = {}
self._temp_cluster_position_seq = -2
self._orientSocket.in_transaction = True
self.get_transaction_id()
return self
def commit(self):
self._orientSocket.in_transaction = False
result = self.prepare().send().fetch_response()
self._operation_stack = []
self._pre_operation_records = {}
self._operation_records = {}
self._tx_id = -1
self._temp_cluster_position_seq = -2
return result
def rollback(self):
self._operation_stack = []
self._pre_operation_records = {}
self._operation_records = {}
self._tx_id = -1
self._temp_cluster_position_seq = -2
self._orientSocket.in_transaction = False
return self
#
# TX COMMIT facade
#
class TxCommitMessage:
def __init__(self, _orient_socket):
self._transaction = _TXCommitMessage(_orient_socket)
pass
def attach(self, operation):
self._transaction.attach( operation )
return self
def begin(self):
self._transaction.begin()
return self
def commit(self):
return self._transaction.commit()
def rollback(self):
return self._transaction.rollback()
def set_session_token(self, token):
self._transaction.set_session_token(token)
return self
| |
# Copyright (c) 2015 Tom Barron. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the NetApp 7mode NFS storage driver
"""
import ddt
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_utils import units
from cinder import exception
from cinder.objects import fields
from cinder import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp import fakes as na_fakes
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_7mode
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp.dataontap.utils import utils as dot_utils
from cinder.volume.drivers.netapp import utils as na_utils
@ddt.ddt
class NetApp7modeNfsDriverTestCase(test.TestCase):
def setUp(self):
super(NetApp7modeNfsDriverTestCase, self).setUp()
kwargs = {
'configuration': self.get_config_7mode(),
'host': 'openstack@7modenfs',
}
with mock.patch.object(utils, 'get_root_helper',
return_value=mock.Mock()):
with mock.patch.object(remotefs_brick, 'RemoteFsClient',
return_value=mock.Mock()):
self.driver = nfs_7mode.NetApp7modeNfsDriver(**kwargs)
self.driver._mounted_shares = [fake.NFS_SHARE]
self.driver.ssc_vols = True
self.driver.zapi_client = mock.Mock()
self.driver.perf_library = mock.Mock()
def get_config_7mode(self):
config = na_fakes.create_configuration_cmode()
config.netapp_storage_protocol = 'nfs'
config.netapp_login = 'root'
config.netapp_password = 'pass'
config.netapp_server_hostname = '127.0.0.1'
config.netapp_transport_type = 'http'
config.netapp_server_port = '80'
return config
@ddt.data({'share': None, 'is_snapshot': False},
{'share': None, 'is_snapshot': True},
{'share': 'fake_share', 'is_snapshot': False},
{'share': 'fake_share', 'is_snapshot': True})
@ddt.unpack
def test_clone_backing_file_for_volume(self, share, is_snapshot):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path',
return_value=(fake.SHARE_IP, fake.EXPORT_PATH))
mock_get_actual_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export',
return_value='fake_path')
self.driver._clone_backing_file_for_volume(
fake.FLEXVOL, 'fake_clone', fake.VOLUME_ID, share=share,
is_snapshot=is_snapshot)
mock_get_export_ip_path.assert_called_once_with(
fake.VOLUME_ID, share)
mock_get_actual_path_for_export.assert_called_once_with(
fake.EXPORT_PATH)
self.driver.zapi_client.clone_file.assert_called_once_with(
'fake_path/' + fake.FLEXVOL, 'fake_path/fake_clone',
None)
@ddt.data({'nfs_sparsed_volumes': True},
{'nfs_sparsed_volumes': False})
@ddt.unpack
def test_get_pool_stats(self, nfs_sparsed_volumes):
self.driver.configuration.nfs_sparsed_volumes = nfs_sparsed_volumes
thick = not nfs_sparsed_volumes
total_capacity_gb = na_utils.round_down(
fake.TOTAL_BYTES // units.Gi, '0.01')
free_capacity_gb = na_utils.round_down(
fake.AVAILABLE_BYTES // units.Gi, '0.01')
provisioned_capacity_gb = total_capacity_gb - free_capacity_gb
capacity = {
'reserved_percentage': fake.RESERVED_PERCENTAGE,
'max_over_subscription_ratio': fake.MAX_OVER_SUBSCRIPTION_RATIO,
'total_capacity_gb': total_capacity_gb,
'free_capacity_gb': free_capacity_gb,
'provisioned_capacity_gb': provisioned_capacity_gb,
}
self.mock_object(self.driver,
'_get_share_capacity_info',
return_value=capacity)
self.mock_object(self.driver.perf_library,
'get_node_utilization',
return_value=30.0)
result = self.driver._get_pool_stats(filter_function='filter',
goodness_function='goodness')
expected = [{'pool_name': '192.168.99.24:/fake/export/path',
'QoS_support': False,
'consistencygroup_support': True,
'thick_provisioning_support': thick,
'thin_provisioning_support': not thick,
'free_capacity_gb': 12.0,
'total_capacity_gb': 4468.0,
'reserved_percentage': 7,
'max_over_subscription_ratio': 19.0,
'multiattach': False,
'provisioned_capacity_gb': 4456.0,
'utilization': 30.0,
'filter_function': 'filter',
'goodness_function': 'goodness'}]
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files(self):
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
mock_get_file_usage = self.mock_object(
self.driver.zapi_client, 'get_file_usage')
mock_get_file_usage.return_value = fake.CAPACITY_VALUES[0]
expected = [(old_file, fake.CAPACITY_VALUES[0]) for old_file
in fake.FILE_LIST]
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, fake.FILE_LIST)
self.assertEqual(expected, result)
def test_shortlist_del_eligible_files_empty_list(self):
mock_get_export_ip_path = self.mock_object(
self.driver, '_get_export_ip_path')
mock_get_export_ip_path.return_value = ('', '/export_path')
mock_get_path_for_export = self.mock_object(
self.driver.zapi_client, 'get_actual_path_for_export')
mock_get_path_for_export.return_value = fake.FLEXVOL
result = self.driver._shortlist_del_eligible_files(
fake.NFS_SHARE, [])
self.assertEqual([], result)
@ddt.data({'has_space': True, 'expected': True},
{'has_space': False, 'expected': False})
@ddt.unpack
def test_is_share_clone_compatible(self, has_space, expected):
mock_share_has_space_for_clone = self.mock_object(
self.driver, '_share_has_space_for_clone')
mock_share_has_space_for_clone.return_value = has_space
result = self.driver._is_share_clone_compatible(fake.VOLUME,
fake.NFS_SHARE)
self.assertEqual(expected, result)
def test__get_volume_model_update(self):
"""Driver is not expected to return a model update."""
self.assertIsNone(
self.driver._get_volume_model_update(fake.VOLUME_REF))
def test_delete_cgsnapshot(self):
mock_delete_file = self.mock_object(self.driver, '_delete_file')
model_update, snapshots_model_update = (
self.driver.delete_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [fake.SNAPSHOT]))
mock_delete_file.assert_called_once_with(
fake.SNAPSHOT['volume_id'], fake.SNAPSHOT['name'])
self.assertIsNone(model_update)
self.assertIsNone(snapshots_model_update)
def test_get_snapshot_backing_flexvol_names(self):
snapshots = [
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
{'volume': {'host': 'hostA@192.168.1.01#/fake/volume2'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume3'}},
{'volume': {'host': 'hostA@192.168.99.25#/fake/volume1'}},
]
hosts = [snap['volume']['host'] for snap in snapshots]
flexvols = self.driver._get_flexvol_names_from_hosts(hosts)
self.assertEqual(3, len(flexvols))
self.assertIn('volume1', flexvols)
self.assertIn('volume2', flexvols)
self.assertIn('volume3', flexvols)
def test_check_for_setup_error(self):
mock_get_ontapi_version = self.mock_object(
self.driver.zapi_client, 'get_ontapi_version')
mock_get_ontapi_version.return_value = ['1', '10']
mock_add_looping_tasks = self.mock_object(
self.driver, '_add_looping_tasks')
mock_super_check_for_setup_error = self.mock_object(
nfs_base.NetAppNfsDriver, 'check_for_setup_error')
self.driver.check_for_setup_error()
mock_get_ontapi_version.assert_called_once_with()
mock_add_looping_tasks.assert_called_once_with()
mock_super_check_for_setup_error.assert_called_once_with()
def test_add_looping_tasks(self):
mock_super_add_looping_tasks = self.mock_object(
nfs_base.NetAppNfsDriver, '_add_looping_tasks')
self.driver._add_looping_tasks()
mock_super_add_looping_tasks.assert_called_once_with()
def test_handle_ems_logging(self):
volume_list = ['vol0', 'vol1', 'vol2']
self.mock_object(
self.driver, '_get_backing_flexvol_names',
return_value=volume_list)
self.mock_object(
dot_utils, 'build_ems_log_message_0',
return_value='fake_base_ems_log_message')
self.mock_object(
dot_utils, 'build_ems_log_message_1',
return_value='fake_pool_ems_log_message')
mock_send_ems_log_message = self.mock_object(
self.driver.zapi_client, 'send_ems_log_message')
self.driver._handle_ems_logging()
mock_send_ems_log_message.assert_has_calls([
mock.call('fake_base_ems_log_message'),
mock.call('fake_pool_ems_log_message'),
])
dot_utils.build_ems_log_message_0.assert_called_once_with(
self.driver.driver_name, self.driver.app_version,
self.driver.driver_mode)
dot_utils.build_ems_log_message_1.assert_called_once_with(
self.driver.driver_name, self.driver.app_version, None,
volume_list, [])
def test_get_backing_flexvol_names(self):
result = self.driver._get_backing_flexvol_names()
self.assertEqual('path', result[0])
def test_create_consistency_group(self):
model_update = self.driver.create_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP)
self.assertEqual('available', model_update['status'])
def test_update_consistencygroup(self):
model_update, add_volumes_update, remove_volumes_update = (
self.driver.update_consistencygroup(fake.CG_CONTEXT, "foo"))
self.assertIsNone(add_volumes_update)
self.assertIsNone(remove_volumes_update)
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src(self, volume_model_update):
volume_model_update = volume_model_update or {}
volume_model_update.update(
{'provider_location': fake.PROVIDER_LOCATION})
mock_create_volume_from_snapshot = self.mock_object(
self.driver, 'create_volume_from_snapshot',
return_value=volume_model_update)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME],
cgsnapshot=fake.CG_SNAPSHOT, snapshots=[fake.SNAPSHOT]))
expected_volumes_model_updates = [{'id': fake.VOLUME['id']}]
expected_volumes_model_updates[0].update(volume_model_update)
mock_create_volume_from_snapshot.assert_called_once_with(
fake.VOLUME, fake.SNAPSHOT)
self.assertIsNone(model_update)
self.assertEqual(expected_volumes_model_updates, volumes_model_update)
@ddt.data(None,
{'replication_status': fields.ReplicationStatus.ENABLED})
def test_create_consistencygroup_from_src_source_vols(
self, volume_model_update):
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
fake_snapshot_name = 'snapshot-temp-' + fake.CONSISTENCY_GROUP['id']
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
self.mock_object(self.driver, '_get_volume_model_update',
return_value=volume_model_update)
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME],
source_cg=fake.CONSISTENCY_GROUP,
source_vols=[fake.NFS_VOLUME]))
expected_volumes_model_updates = [{
'id': fake.NFS_VOLUME['id'],
'provider_location': fake.PROVIDER_LOCATION,
}]
if volume_model_update:
expected_volumes_model_updates[0].update(volume_model_update)
mock_get_snapshot_flexvols.assert_called_once_with(
[fake.NFS_VOLUME['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake_snapshot_name)
mock_clone_backing_file.assert_called_once_with(
fake.NFS_VOLUME['name'], fake.VOLUME['name'],
fake.NFS_VOLUME['id'], source_snapshot=fake_snapshot_name)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake_snapshot_name)
self.assertIsNone(model_update)
self.assertEqual(expected_volumes_model_updates, volumes_model_update)
def test_create_consistencygroup_from_src_invalid_parms(self):
model_update, volumes_model_update = (
self.driver.create_consistencygroup_from_src(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.VOLUME]))
self.assertIn('error', model_update['status'])
def test_create_cgsnapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
self.driver.create_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot])
mock_get_snapshot_flexvols.assert_called_once_with(
[snapshot['volume']['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_backing_file.assert_called_once_with(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
self.driver.zapi_client.delete_snapshot.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_create_cgsnapshot_busy_snapshot(self):
snapshot = fake.CG_SNAPSHOT
snapshot['volume'] = fake.CG_VOLUME
mock_get_snapshot_flexvols = self.mock_object(
self.driver, '_get_flexvol_names_from_hosts')
mock_get_snapshot_flexvols.return_value = (set([fake.CG_POOL_NAME]))
mock_clone_backing_file = self.mock_object(
self.driver, '_clone_backing_file_for_volume')
mock_busy = self.mock_object(
self.driver.zapi_client, 'wait_for_busy_snapshot')
mock_busy.side_effect = exception.SnapshotIsBusy(snapshot['name'])
mock_mark_snapshot_for_deletion = self.mock_object(
self.driver.zapi_client, 'mark_snapshot_for_deletion')
self.driver.create_cgsnapshot(
fake.CG_CONTEXT, fake.CG_SNAPSHOT, [snapshot])
mock_get_snapshot_flexvols.assert_called_once_with(
[snapshot['volume']['host']])
self.driver.zapi_client.create_cg_snapshot.assert_called_once_with(
set([fake.CG_POOL_NAME]), fake.CG_SNAPSHOT_ID)
mock_clone_backing_file.assert_called_once_with(
snapshot['volume']['name'], snapshot['name'],
snapshot['volume']['id'], source_snapshot=fake.CG_SNAPSHOT_ID)
mock_busy.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
self.driver.zapi_client.delete_snapshot.assert_not_called()
mock_mark_snapshot_for_deletion.assert_called_once_with(
fake.CG_POOL_NAME, fake.CG_SNAPSHOT_ID)
def test_delete_consistencygroup_volume_delete_failure(self):
self.mock_object(self.driver, '_delete_file', side_effect=Exception)
model_update, volumes = self.driver.delete_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('error_deleting', volumes[0]['status'])
def test_delete_consistencygroup(self):
mock_delete_file = self.mock_object(
self.driver, '_delete_file')
model_update, volumes = self.driver.delete_consistencygroup(
fake.CG_CONTEXT, fake.CONSISTENCY_GROUP, [fake.CG_VOLUME])
self.assertEqual('deleted', model_update['status'])
self.assertEqual('deleted', volumes[0]['status'])
mock_delete_file.assert_called_once_with(
fake.CG_VOLUME_ID, fake.CG_VOLUME_NAME)
| |
__author__ = 'Charlie'
# Utils used with tensorflow implemetation
import tensorflow as tf
import numpy as np
import scipy.misc as misc
import os, sys
from six.moves import urllib
import tarfile
import zipfile
import scipy.io
def get_model_data(dir_path, model_url):
maybe_download_and_extract(dir_path, model_url)
filename = model_url.split("/")[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
raise IOError("VGG Model not found!")
data = scipy.io.loadmat(filepath)
return data
def maybe_download_and_extract(dir_path, url_name, is_tarfile=False, is_zipfile=False):
if not os.path.exists(dir_path):
os.makedirs(dir_path)
filename = url_name.split('/')[-1]
filepath = os.path.join(dir_path, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write(
'\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(url_name, filepath, reporthook=_progress)
print()
statinfo = os.stat(filepath)
print('Succesfully downloaded', filename, statinfo.st_size, 'bytes.')
if is_tarfile:
tarfile.open(filepath, 'r:gz').extractall(dir_path)
elif is_zipfile:
with zipfile.ZipFile(filepath) as zf:
zip_dir = zf.namelist()[0]
zf.extractall(dir_path)
def save_image(image, save_dir, name, mean=None):
"""
Save image by unprocessing if mean given else just save
:param mean:
:param image:
:param save_dir:
:param name:
:return:
"""
if mean:
image = unprocess_image(image, mean)
misc.imsave(os.path.join(save_dir, name + ".png"), image)
# as describe at Sec.4.2
def get_variable(weights, name):
if name == 'conv1_1_w':
k1, k2, ic, oc = weights.shape
concat_weights = np.random.normal(0.0, 1.0, size=(k1, k2, 2 * ic, oc))
concat_weights[:, :, 0:ic, :] = weights
init = tf.constant_initializer(concat_weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=concat_weights.shape)
return var
init = tf.constant_initializer(weights, dtype=tf.float32)
var = tf.get_variable(name=name, initializer=init, shape=weights.shape)
return var
def weight_variable(shape, stddev=0.02, name=None):
# print(shape)
initial = tf.truncated_normal(shape, stddev=stddev)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def bias_variable(shape, name=None):
initial = tf.constant(0.0, shape=shape)
if name is None:
return tf.Variable(initial)
else:
return tf.get_variable(name, initializer=initial)
def get_tensor_size(tensor):
from operator import mul
return reduce(mul, (d.value for d in tensor.get_shape()), 1)
def conv2d_basic(x, W, bias):
conv = tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding="SAME")
return tf.nn.bias_add(conv, bias)
def conv2d_strided(x, W, b):
conv = tf.nn.conv2d(x, W, strides=[1, 2, 2, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def conv2d_transpose_strided(x, W, b, output_shape=None, stride = 2):
# print x.get_shape()
# print W.get_shape()
if output_shape is None:
output_shape = x.get_shape().as_list()
output_shape[1] *= 2
output_shape[2] *= 2
output_shape[3] = W.get_shape().as_list()[2]
# print output_shape
conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding="SAME")
return tf.nn.bias_add(conv, b)
def leaky_relu(x, alpha=0.0, name=""):
return tf.maximum(alpha * x, x, name)
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def avg_pool_2x2(x):
return tf.nn.avg_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")
def local_response_norm(x):
return tf.nn.lrn(x, depth_radius=5, bias=2, alpha=1e-4, beta=0.75)
def batch_norm(x, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
"""
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.variable_scope(scope):
beta = tf.get_variable(name='beta', shape=[n_out], initializer=tf.constant_initializer(0.0)
, trainable=True)
gamma = tf.get_variable(name='gamma', shape=[n_out], initializer=tf.random_normal_initializer(1.0, 0.02),
trainable=True)
batch_mean, batch_var = tf.nn.moments(x, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(x, mean, var, beta, gamma, eps)
return normed
def process_image(image, mean_pixel):
return image - mean_pixel
def unprocess_image(image, mean_pixel):
return image + mean_pixel
def bottleneck_unit(x, out_chan1, out_chan2, down_stride=False, up_stride=False, name=None):
"""
Modified implementation from github ry?!
"""
def conv_transpose(tensor, out_channel, shape, strides, name=None):
out_shape = tensor.get_shape().as_list()
in_channel = out_shape[-1]
kernel = weight_variable([shape, shape, out_channel, in_channel], name=name)
shape[-1] = out_channel
return tf.nn.conv2d_transpose(x, kernel, output_shape=out_shape, strides=[1, strides, strides, 1],
padding='SAME', name='conv_transpose')
def conv(tensor, out_chans, shape, strides, name=None):
in_channel = tensor.get_shape().as_list()[-1]
kernel = weight_variable([shape, shape, in_channel, out_chans], name=name)
return tf.nn.conv2d(x, kernel, strides=[1, strides, strides, 1], padding='SAME', name='conv')
def bn(tensor, name=None):
"""
:param tensor: 4D tensor input
:param name: name of the operation
:return: local response normalized tensor - not using batch normalization :(
"""
return tf.nn.lrn(tensor, depth_radius=5, bias=2, alpha=1e-4, beta=0.75, name=name)
in_chans = x.get_shape().as_list()[3]
if down_stride or up_stride:
first_stride = 2
else:
first_stride = 1
with tf.variable_scope('res%s' % name):
if in_chans == out_chan2:
b1 = x
else:
with tf.variable_scope('branch1'):
if up_stride:
b1 = conv_transpose(x, out_chans=out_chan2, shape=1, strides=first_stride,
name='res%s_branch1' % name)
else:
b1 = conv(x, out_chans=out_chan2, shape=1, strides=first_stride, name='res%s_branch1' % name)
b1 = bn(b1, 'bn%s_branch1' % name, 'scale%s_branch1' % name)
with tf.variable_scope('branch2a'):
if up_stride:
b2 = conv_transpose(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
else:
b2 = conv(x, out_chans=out_chan1, shape=1, strides=first_stride, name='res%s_branch2a' % name)
b2 = bn(b2, 'bn%s_branch2a' % name, 'scale%s_branch2a' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2b'):
b2 = conv(b2, out_chans=out_chan1, shape=3, strides=1, name='res%s_branch2b' % name)
b2 = bn(b2, 'bn%s_branch2b' % name, 'scale%s_branch2b' % name)
b2 = tf.nn.relu(b2, name='relu')
with tf.variable_scope('branch2c'):
b2 = conv(b2, out_chans=out_chan2, shape=1, strides=1, name='res%s_branch2c' % name)
b2 = bn(b2, 'bn%s_branch2c' % name, 'scale%s_branch2c' % name)
x = b1 + b2
return tf.nn.relu(x, name='relu')
def add_to_regularization_and_summary(var):
if var is not None:
tf.histogram_summary(var.op.name, var)
tf.add_to_collection("reg_loss", tf.nn.l2_loss(var))
def add_activation_summary(var):
if var is not None:
tf.histogram_summary(var.op.name + "/activation", var)
tf.scalar_summary(var.op.name + "/sparsity", tf.nn.zero_fraction(var))
def add_gradient_summary(grad, var):
if grad is not None:
tf.histogram_summary(var.op.name + "/gradient", grad)
| |
# Copyright 2012, Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can
# be found in the LICENSE file.
from itertools import izip
import logging
from net import bsonrpc
from net import gorpc
from vtdb import cursor
from vtdb import dbexceptions
from vtdb import field_types
# A simple, direct connection to the voltron query server.
# This is shard-unaware and only handles the most basic communication.
class TabletConnection(object):
transaction_id = 0
session_id = 0
cursorclass = cursor.TabletCursor
def __init__(self, addr, keyspace, shard, timeout, user=None, password=None, encrypted=False, keyfile=None, certfile=None):
self.addr = addr
self.keyspace = keyspace
self.shard = shard
self.timeout = timeout
self.client = bsonrpc.BsonRpcClient(addr, timeout, user, password, encrypted=encrypted, keyfile=keyfile, certfile=certfile)
def dial(self):
try:
if self.session_id:
self.client.close()
self.client.dial()
params = {'Keyspace': self.keyspace, 'Shard': self.shard}
response = self.client.call('SqlQuery.GetSessionId', params)
self.session_id = response.reply['SessionId']
except gorpc.GoRpcError as e:
raise dbexceptions.OperationalError(*e.args)
def close(self):
self.transaction_id = 0
self.session_id = 0
self.client.close()
def _make_req(self):
return {'TransactionId': self.transaction_id,
'ConnectionId': 0,
'SessionId': self.session_id}
def begin(self):
if self.transaction_id:
raise dbexceptions.NotSupportedError('Nested transactions not supported')
req = self._make_req()
try:
response = self.client.call('SqlQuery.Begin', req)
self.transaction_id = response.reply['TransactionId']
except gorpc.GoRpcError as e:
raise dbexceptions.OperationalError(*e.args)
def commit(self):
if not self.transaction_id:
return
req = self._make_req()
# NOTE(msolomon) Unset the transaction_id irrespective of the RPC's
# response. The intent of commit is that no more statements can be made on
# this transaction, so we guarantee that. Transient errors between the
# db and the client shouldn't affect this part of the bookkeeping.
# Do this after fill_session, since this is a critical part.
self.transaction_id = 0
try:
response = self.client.call('SqlQuery.Commit', req)
return response.reply
except gorpc.GoRpcError as e:
raise dbexceptions.OperationalError(*e.args)
def rollback(self):
if not self.transaction_id:
return
req = self._make_req()
# NOTE(msolomon) Unset the transaction_id irrespective of the RPC. If the
# RPC fails, the client will still choose a new transaction_id next time
# and the tablet server will eventually kill the abandoned transaction on
# the server side.
self.transaction_id = 0
try:
response = self.client.call('SqlQuery.Rollback', req)
return response.reply
except gorpc.GoRpcError as e:
raise dbexceptions.OperationalError(*e.args)
def cursor(self, cursorclass=None, **kargs):
return (cursorclass or self.cursorclass)(self, **kargs)
def _execute(self, sql, bind_variables):
new_binds = field_types.convert_bind_vars(bind_variables)
req = self._make_req()
req['Sql'] = sql
req['BindVariables'] = new_binds
fields = []
conversions = []
results = []
try:
response = self.client.call('SqlQuery.Execute', req)
reply = response.reply
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
except gorpc.GoRpcError as e:
raise dbexceptions.OperationalError(*e.args)
except:
logging.exception('gorpc low-level error')
raise
return results, rowcount, lastrowid, fields
def _execute_batch(self, sql_list, bind_variables_list):
query_list = []
for sql, bind_vars in zip(sql_list, bind_variables_list):
req = self._make_req()
req['Sql'] = sql
req['BindVariables'] = field_types.convert_bind_vars(bind_vars)
query_list.append(req)
rowsets = []
try:
req = {'List': query_list}
response = self.client.call('SqlQuery.ExecuteBatch', req)
for reply in response.reply['List']:
fields = []
conversions = []
results = []
rowcount = 0
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
for row in reply['Rows']:
results.append(tuple(_make_row(row, conversions)))
rowcount = reply['RowsAffected']
lastrowid = reply['InsertId']
rowsets.append((results, rowcount, lastrowid, fields))
except gorpc.GoRpcError as e:
raise dbexceptions.OperationalError(*e.args)
except:
logging.exception('gorpc low-level error')
raise
return rowsets
# we return the fields for the response, and the column conversions
# the conversions will need to be passed back to _stream_next
# (that way we avoid using a member variable here for such a corner case)
def _stream_execute(self, sql, bind_variables):
new_binds = field_types.convert_bind_vars(bind_variables)
req = self._make_req()
req['Sql'] = sql
req['BindVariables'] = new_binds
fields = []
conversions = []
try:
self.client.stream_call('SqlQuery.StreamExecute', req)
first_response = self.client.stream_next()
reply = first_response.reply
for field in reply['Fields']:
fields.append((field['Name'], field['Type']))
conversions.append(field_types.conversions.get(field['Type']))
except gorpc.GoRpcError as e:
raise dbexceptions.OperationalError(*e.args)
except:
logging.exception('gorpc low-level error')
raise
return fields, conversions, None, 0
# the calls to _stream_next will have the following states:
# conversions, None, 0 (that will trigger asking for one result)
# conversions, result, 1 (the next call will just return the 2nd row)
# conversions, result, 2
# ...
# conversions, result, len(result)-1
# conversions, None, 0 (that will trigger asking for one more result)
# ...
# conversions, last result, len(last result)-1
# (asking for next result will return None)
# conversions, None, None (this is then stable and stays that way)
# conversions, None, None
#
# the StreamCursor in cursor.py is a good implementation of this API.
def _stream_next(self, conversions, query_result, index):
# if index is None, it means we're done (because _stream_next
# returned None, see 7 lines below here)
if index is None:
return None, None, None
# see if we need to read more
if query_result is None:
try:
query_result = self.client.stream_next()
if query_result is None:
return None, None, None
except gorpc.GoRpcError as e:
raise dbexceptions.OperationalError(*e.args)
except:
logging.exception('gorpc low-level error')
raise
result = tuple(_make_row(query_result.reply['Rows'][index], conversions))
index += 1
if index == len(query_result.reply['Rows']):
query_result = None
index = 0
return result, query_result, index
def _make_row(row, conversions):
converted_row = []
for conversion_func, field_data in izip(conversions, row):
if field_data is None:
v = None
elif conversion_func:
v = conversion_func(field_data)
else:
v = field_data
converted_row.append(v)
return converted_row
def connect(*pargs, **kargs):
conn = TabletConnection(*pargs, **kargs)
conn.dial()
return conn
| |
import distutils
import distutils.spawn
import logging
import subprocess
import tempfile
import types
from shlex import quote
from ray.tune.error import TuneError
logger = logging.getLogger(__name__)
S3_PREFIX = "s3://"
GS_PREFIX = "gs://"
HDFS_PREFIX = "hdfs://"
ALLOWED_REMOTE_PREFIXES = (S3_PREFIX, GS_PREFIX, HDFS_PREFIX)
noop_template = ": {target}" # noop in bash
def noop(*args):
return
def get_sync_client(sync_function, delete_function=None):
"""Returns a sync client.
Args:
sync_function (Optional[str|function]): Sync function.
delete_function (Optional[str|function]): Delete function. Must be
the same type as sync_function if it is provided.
Raises:
ValueError if sync_function or delete_function are malformed.
"""
if sync_function is None:
return None
if delete_function and type(sync_function) != type(delete_function):
raise ValueError("Sync and delete functions must be of same type.")
if isinstance(sync_function, types.FunctionType):
delete_function = delete_function or noop
client_cls = FunctionBasedClient
elif isinstance(sync_function, str):
delete_function = delete_function or noop_template
client_cls = CommandBasedClient
else:
raise ValueError("Sync function {} must be string or function".format(
sync_function))
return client_cls(sync_function, sync_function, delete_function)
def get_cloud_sync_client(remote_path):
"""Returns a CommandBasedClient that can sync to/from remote storage.
Args:
remote_path (str): Path to remote storage (S3, GS or HDFS).
Raises:
ValueError if malformed remote_dir.
"""
if remote_path.startswith(S3_PREFIX):
if not distutils.spawn.find_executable("aws"):
raise ValueError(
"Upload uri starting with '{}' requires awscli tool"
" to be installed".format(S3_PREFIX))
sync_up_template = "aws s3 sync {source} {target} --only-show-errors"
sync_down_template = sync_up_template
delete_template = "aws s3 rm {target} --recursive --only-show-errors"
elif remote_path.startswith(GS_PREFIX):
if not distutils.spawn.find_executable("gsutil"):
raise ValueError(
"Upload uri starting with '{}' requires gsutil tool"
" to be installed".format(GS_PREFIX))
sync_up_template = "gsutil rsync -r {source} {target}"
sync_down_template = sync_up_template
delete_template = "gsutil rm -r {target}"
elif remote_path.startswith(HDFS_PREFIX):
if not distutils.spawn.find_executable("hdfs"):
raise ValueError("Upload uri starting with '{}' requires hdfs tool"
" to be installed".format(HDFS_PREFIX))
sync_up_template = "hdfs dfs -put -f {source} {target}"
sync_down_template = "hdfs dfs -get -f {target} {source}"
delete_template = "hdfs dfs -rm -r {target}"
else:
raise ValueError("Upload uri must start with one of: {}"
"".format(ALLOWED_REMOTE_PREFIXES))
return CommandBasedClient(sync_up_template, sync_down_template,
delete_template)
class SyncClient:
"""Client interface for interacting with remote storage options."""
def sync_up(self, source, target):
"""Syncs up from source to target.
Args:
source (str): Source path.
target (str): Target path.
Returns:
True if sync initiation successful, False otherwise.
"""
raise NotImplementedError
def sync_down(self, source, target):
"""Syncs down from source to target.
Args:
source (str): Source path.
target (str): Target path.
Returns:
True if sync initiation successful, False otherwise.
"""
raise NotImplementedError
def delete(self, target):
"""Deletes target.
Args:
target (str): Target path.
Returns:
True if delete initiation successful, False otherwise.
"""
raise NotImplementedError
def wait(self):
"""Waits for current sync to complete, if asynchronously started."""
pass
def reset(self):
"""Resets state."""
pass
def close(self):
"""Clean up hook."""
pass
class FunctionBasedClient(SyncClient):
def __init__(self, sync_up_func, sync_down_func, delete_func=None):
self.sync_up_func = sync_up_func
self.sync_down_func = sync_down_func
self.delete_func = delete_func or noop
def sync_up(self, source, target):
self.sync_up_func(source, target)
return True
def sync_down(self, source, target):
self.sync_down_func(source, target)
return True
def delete(self, target):
self.delete_func(target)
return True
NOOP = FunctionBasedClient(noop, noop)
class CommandBasedClient(SyncClient):
def __init__(self,
sync_up_template,
sync_down_template,
delete_template=noop_template):
"""Syncs between two directories with the given command.
Arguments:
sync_up_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
sync_down_template (str): A runnable string template; needs to
include replacement fields '{source}' and '{target}'.
delete_template (Optional[str]): A runnable string template; needs
to include replacement field '{target}'. Noop by default.
"""
self._validate_sync_string(sync_up_template)
self._validate_sync_string(sync_down_template)
self.sync_up_template = sync_up_template
self.sync_down_template = sync_down_template
self.delete_template = delete_template
self.logfile = None
self._closed = False
self.cmd_process = None
def set_logdir(self, logdir):
"""Sets the directory to log sync execution output in.
Args:
logdir (str): Log directory.
"""
self.logfile = tempfile.NamedTemporaryFile(
prefix="log_sync_out", dir=logdir, suffix=".log", delete=False)
self._closed = False
def _get_logfile(self):
if self._closed:
raise RuntimeError(
"[internalerror] The client has been closed. "
"Please report this stacktrace + your cluster configuration "
"on Github!")
else:
return self.logfile
def sync_up(self, source, target):
return self._execute(self.sync_up_template, source, target)
def sync_down(self, source, target):
return self._execute(self.sync_down_template, source, target)
def delete(self, target):
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = self.delete_template.format(target=quote(target))
logger.debug("Running delete: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd,
shell=True,
stderr=subprocess.PIPE,
stdout=self._get_logfile())
return True
def wait(self):
if self.cmd_process:
_, error_msg = self.cmd_process.communicate()
error_msg = error_msg.decode("ascii")
code = self.cmd_process.returncode
args = self.cmd_process.args
self.cmd_process = None
if code != 0:
raise TuneError("Sync error. Ran command: {}\n"
"Error message ({}): {}".format(
args, code, error_msg))
def reset(self):
if self.is_running:
logger.warning("Sync process still running but resetting anyways.")
self.cmd_process = None
def close(self):
if self.logfile:
logger.debug(f"Closing the logfile: {str(self.logfile)}")
self.logfile.close()
self.logfile = None
self._closed = True
@property
def is_running(self):
"""Returns whether a sync or delete process is running."""
if self.cmd_process:
self.cmd_process.poll()
return self.cmd_process.returncode is None
return False
def _execute(self, sync_template, source, target):
"""Executes sync_template on source and target."""
if self.is_running:
logger.warning("Last sync client cmd still in progress, skipping.")
return False
final_cmd = sync_template.format(
source=quote(source), target=quote(target))
logger.debug("Running sync: {}".format(final_cmd))
self.cmd_process = subprocess.Popen(
final_cmd,
shell=True,
stderr=subprocess.PIPE,
stdout=self._get_logfile())
return True
@staticmethod
def _validate_sync_string(sync_string):
if not isinstance(sync_string, str):
raise ValueError("{} is not a string.".format(sync_string))
if "{source}" not in sync_string:
raise ValueError("Sync template missing '{source}'.")
if "{target}" not in sync_string:
raise ValueError("Sync template missing '{target}'.")
| |
#
# FBrowserBase.py -- Base class for file browser plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import os, glob
import stat, time
from ginga.misc import Bunch
from ginga import GingaPlugin
from ginga import AstroImage
from ginga.util import paths
from ginga.util.six.moves import map, zip
try:
from astropy.io import fits as pyfits
have_astropy = True
except ImportError:
have_astropy = False
class FBrowserBase(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(FBrowserBase, self).__init__(fv, fitsimage)
keywords = [ ('Object', 'OBJECT'),
('Date', 'DATE-OBS'),
('Time UT', 'UT'),
]
columns = [('Name', 'name'),
('Size', 'st_size_str'),
('Mode', 'st_mode_oct'),
('Last Changed', 'st_mtime_str')
]
self.jumpinfo = []
# setup plugin preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_FBrowser')
self.settings.addDefaults(home_path=paths.home,
scan_fits_headers=False,
scan_limit=100,
keywords=keywords,
columns=columns)
self.settings.load(onError='silent')
homedir = self.settings.get('home_path', None)
if homedir is None:
homedir = paths.home
self.curpath = os.path.join(homedir, '*')
self.do_scanfits = self.settings.get('scan_fits_headers', False)
self.scan_limit = self.settings.get('scan_limit', 100)
self.keywords = self.settings.get('keywords', keywords)
self.columns = self.settings.get('columns', columns)
self.moving_cursor = False
self.na_dict = { attrname: 'N/A' for colname, attrname in self.columns }
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_local_plugin(chname, str(self))
return True
def file_icon(self, bnch):
if bnch.type == 'dir':
pb = self.folderpb
elif bnch.type == 'fits':
pb = self.fitspb
else:
pb = self.filepb
return pb
def open_file(self, path):
self.logger.debug("path: %s" % (path))
if path == '..':
curdir, curglob = os.path.split(self.curpath)
path = os.path.join(curdir, path, curglob)
if os.path.isdir(path):
path = os.path.join(path, '*')
self.browse(path)
elif os.path.exists(path):
#self.fv.load_file(path)
uri = "file://%s" % (path)
self.fitsimage.make_callback('drag-drop', [uri])
else:
self.browse(path)
def get_info(self, path):
dirname, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
ftype = 'file'
if os.path.isdir(path):
ftype = 'dir'
elif os.path.islink(path):
ftype = 'link'
elif ext.lower() == '.fits':
ftype = 'fits'
bnch = Bunch.Bunch(self.na_dict)
try:
filestat = os.stat(path)
bnch.update(dict(path=path, name=filename, type=ftype,
st_mode=filestat.st_mode,
st_mode_oct=oct(filestat.st_mode),
st_size=filestat.st_size,
st_size_str=str(filestat.st_size),
st_mtime=filestat.st_mtime,
st_mtime_str=time.ctime(filestat.st_mtime)))
except OSError as e:
# TODO: identify some kind of error with this path
bnch.update(dict(path=path, name=filename, type=ftype,
st_mode=0, st_size=0,
st_mtime=0))
return bnch
def browse(self, path):
self.logger.debug("path: %s" % (path))
if os.path.isdir(path):
dirname = path
globname = None
else:
dirname, globname = os.path.split(path)
dirname = os.path.abspath(dirname)
# check validity of leading path name
if not os.path.isdir(dirname):
self.fv.show_error("Not a valid path: %s" % (dirname))
return
if not globname:
globname = '*'
path = os.path.join(dirname, globname)
# Make a directory listing
self.logger.debug("globbing path: %s" % (path))
filelist = list(glob.glob(path))
filelist.sort(key=lambda s: s.lower())
filelist.insert(0, os.path.join(dirname, '..'))
self.jumpinfo = list(map(self.get_info, filelist))
self.curpath = path
if self.do_scanfits:
num_files = len(self.jumpinfo)
if num_files <= self.scan_limit:
self.scan_fits()
else:
self.logger.warn("Number of files (%d) is greater than scan limit (%d)--skipping header scan" % (
num_files, self.scan_limit))
self.makelisting(path)
def scan_fits(self):
# Scan each FITS file and add header items
self.logger.info("scanning files for header keywords...")
start_time = time.time()
for bnch in self.jumpinfo:
if (not bnch.type == 'fits') or (not have_astropy):
continue
try:
with pyfits.open(bnch.path, 'readonly') as in_f:
kwds = { attrname: in_f[0].header.get(kwd, 'N/A')
for attrname, kwd in self.keywords}
bnch.update(kwds)
except Exception as e:
self.logger.warn("Error reading FITS keywords from '%s': %s" % (
bnch.path, str(e)))
continue
elapsed = time.time() - start_time
self.logger.info("done scanning--scan time: %.2f sec" % (elapsed))
def refresh(self):
self.browse(self.curpath)
def scan_headers(self):
self.browse(self.curpath)
def make_thumbs(self):
path = self.curpath
self.logger.info("Generating thumbnails for '%s'..." % (
path))
filelist = glob.glob(path)
filelist.sort(key=lambda s: s.lower())
# find out our channel
chname = self.fv.get_channelName(self.fitsimage)
# Invoke the method in this channel's Thumbs plugin
# TODO: don't expose gpmon!
rsobj = self.fv.gpmon.getPlugin('Thumbs')
self.fv.nongui_do(rsobj.make_thumbs, chname, filelist)
def start(self):
self.win = None
self.browse(self.curpath)
def pause(self):
pass
def resume(self):
pass
def stop(self):
pass
def redo(self):
return True
#END
| |
#coding: utf-8
import unittest
import re
import sys
import os.path as p
sys.path.insert(0, p.join(p.dirname(p.abspath(__file__)), '..', 'src'))
import ope_manip as om
body_if_branch = """
0: iload_1
1: ifle 12
4: getstatic #2 // Field java/lang/System.out:Ljava/io/PrintStream;
7: ldc #3 // String x > 0
9: invokevirtual #4 // Method java/io/PrintStream.println:(Ljava/lang/String;)V
12: return
"""[1:-1].split('\n')
body_for_loop = """
0: iconst_0
1: istore_1
2: iload_1
3: bipush 10
5: if_icmpge 34
8: getstatic #2 // Field java/lang/System.out:Ljava/io/PrintStream;
11: ldc #5 // String i = %d
13: iconst_1
14: anewarray #6 // class java/lang/Object
17: dup
18: iconst_0
19: iload_1
20: invokestatic #7 // Method java/lang/Integer.valueOf:(I)Ljava/lang/Integer;
23: aastore
24: invokevirtual #8 // Method java/io/PrintStream.printf:(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;
27: pop
28: iinc 1, 1
31: goto 2
34: return
"""[1:-1].split('\n')
body_table_switch = """
0: iload_1
1: tableswitch { // 1 to 2
1: 24
2: 35
default: 46
}
24: getstatic #16 // Field java/lang/System.out:Ljava/io/PrintStream;
27: ldc #22 // String v = 1
29: invokevirtual #24 // Method java/io/PrintStream.println:(Ljava/lang/String;)V
32: goto 54
35: getstatic #16 // Field java/lang/System.out:Ljava/io/PrintStream;
38: ldc #30 // String v = 2
40: invokevirtual #24 // Method java/io/PrintStream.println:(Ljava/lang/String;)V
43: goto 54
46: getstatic #16 // Field java/lang/System.out:Ljava/io/PrintStream;
49: ldc #32 // String others
51: invokevirtual #24 // Method java/io/PrintStream.println:(Ljava/lang/String;)V
54: return
"""[1:-1].split('\n')
body_lookup_switch = """
0: iload_1
1: lookupswitch { // 3
1: 36
17: 47
20: 58
default: 69
}
36: getstatic #16 // Field java/lang/System.out:Ljava/io/PrintStream;
39: ldc #22 // String v = 1
41: invokevirtual #24 // Method java/io/PrintStream.println:(Ljava/lang/String;)V
44: goto 77
47: getstatic #16 // Field java/lang/System.out:Ljava/io/PrintStream;
50: ldc #38 // String v = 17
52: invokevirtual #24 // Method java/io/PrintStream.println:(Ljava/lang/String;)V
55: goto 77
58: getstatic #16 // Field java/lang/System.out:Ljava/io/PrintStream;
61: ldc #40 // String v = 20
63: invokevirtual #24 // Method java/io/PrintStream.println:(Ljava/lang/String;)V
66: goto 77
69: getstatic #16 // Field java/lang/System.out:Ljava/io/PrintStream;
72: ldc #32 // String others
74: invokevirtual #24 // Method java/io/PrintStream.println:(Ljava/lang/String;)V
77: return
"""[1:-1].split('\n')
class TestOpeManip(unittest.TestCase):
def testBodyTextToOpeListBranch(self):
ope_list = om.body_text_to_ope_list(body_if_branch)
self.assertEqual(len(ope_list), 13)
self.assertSequenceEqual(ope_list, [
('iload_1', (), None),
('ifle', ('12',), None),
None,
None,
('getstatic', ('#2',), '// Field java/lang/System.out:Ljava/io/PrintStream;'),
None,
None,
('ldc', ('#3',), '// String x > 0'),
None,
('invokevirtual', ('#4',), '// Method java/io/PrintStream.println:(Ljava/lang/String;)V'),
None,
None,
('return', (), None)
])
om.verify_branch_ope(ope_list)
def testBodyTextToOpeListLoop(self):
ope_list = om.body_text_to_ope_list(body_for_loop)
self.assertEqual(len(ope_list), 35)
self.assertSequenceEqual(ope_list, [
('iconst_0', (), None),
('istore_1', (), None),
('iload_1', (), None),
('bipush', ('10',), None),
None,
('if_icmpge', ('34',), None),
None,
None,
('getstatic', ('#2',), '// Field java/lang/System.out:Ljava/io/PrintStream;'),
None,
None,
('ldc', ('#5',), '// String i = %d'),
None,
('iconst_1', (), None),
('anewarray', ('#6',), '// class java/lang/Object'),
None,
None,
('dup', (), None),
('iconst_0', (), None),
('iload_1', (), None),
('invokestatic', ('#7',), '// Method java/lang/Integer.valueOf:(I)Ljava/lang/Integer;'),
None,
None,
('aastore', (), None),
('invokevirtual', ('#8',), '// Method java/io/PrintStream.printf:(Ljava/lang/String;[Ljava/lang/Object;)Ljava/io/PrintStream;'),
None,
None,
('pop', (), None),
('iinc', ('1', '1'), None),
None,
None,
('goto', ('2',), None),
None,
None,
('return', (), None)
])
om.verify_branch_ope(ope_list)
def testBodyTableSwitch(self):
ope_list = om.body_text_to_ope_list(body_table_switch)
self.assertEqual(len(ope_list), 55)
self.assertSequenceEqual(ope_list[:25], [
('iload_1', (), None),
('tableswitch', [('1', '24'), ('2', '35'), ('default', '46')], '// 1 to 2'),
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None,
None, None,
('getstatic', ('#16',), '// Field java/lang/System.out:Ljava/io/PrintStream;')
])
om.verify_branch_ope(ope_list)
def testBodyLookupSwitch(self):
ope_list = om.body_text_to_ope_list(body_lookup_switch)
self.assertEqual(len(ope_list), 78)
self.assertSequenceEqual(ope_list[:37], [
('iload_1', (), None),
('lookupswitch', [('1', '36'), ('17', '47'), ('20', '58'), ('default', '69')], '// 3'),
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None,
None, None, None, None, None, None, None, None, None, None,
None, None, None, None,
('getstatic', ('#16',), '// Field java/lang/System.out:Ljava/io/PrintStream;')
])
om.verify_branch_ope(ope_list)
def testVerifyBranchOpe(self):
ope_list = [
('if_icmpge', ('1',), None),
('iload_1', (), None),
]
om.verify_branch_ope(ope_list)
ope_list = ope_list[:1]
with self.assertRaises(om.InvalidOpe):
om.verify_branch_ope(ope_list)
ope_list = [
('if_icmpge', ('1',), None),
None
]
with self.assertRaises(om.InvalidOpe):
om.verify_branch_ope(ope_list)
def testFormatOpeList(self):
ope_list = om.body_text_to_ope_list(body_lookup_switch)
lines = om.format_ope_list(ope_list)
original_seq = [re.sub(r'\s+', ' ', L.rstrip()) for L in body_lookup_switch]
formatted_seq = [re.sub(r'\s+', ' ', L.rstrip()) for L in lines]
self.assertSequenceEqual(formatted_seq, original_seq)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| |
"""
Tests for `bx.align.maf`.
"""
from io import StringIO
import bx.align as align
import bx.align.maf as maf
# A simple MAF from the rat paper days
test_maf = """##maf version=1 scoring=humor.v4
# humor.v4 R=30 M=10 /cluster/data/hg15/bed/blastz.mm3/axtNet300/chr1.maf
# /cluster/data/hg15/bed/blastz.rn3/axtNet300/chr1.maf
a score=0.128
s human_hoxa 100 8 + 100257 ACA-TTACT
s horse_hoxa 120 9 - 98892 ACAATTGCT
s fugu_hoxa 88 7 + 90788 ACA--TGCT
a score=0.071
s human_unc 9077 8 + 10998 ACAGTATT
# Comment
s horse_unc 4555 6 - 5099 ACA--ATT
s fugu_unc 4000 4 + 4038 AC----TT
"""
# A more complicated MAF with synteny annotation and such
test_maf_2 = """##maf version=1 scoring=autoMZ.v1
a score=3656.000000
s hg17.chr1 2005 34 + 245522847 TGTAACTTAATACCACAACCAGGCATAGGGG--AAA-------------
s rheMac2.chr11 9625228 31 + 134511895 TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------
i rheMac2.chr11 C 0 I 1678
s panTro1.chr1 2014 34 + 229575298 TGTAACTTAATACCACAACCAGGCATGGGGG--AAA-------------
i panTro1.chr1 C 0 C 0
s bosTau2.chr5 64972365 47 + 76426644 TCCAGCCATGTGTTGTGATCAG--CCAGGGGCTAAAGCCATGGCGGTAG
i bosTau2.chr5 C 0 I 1462
s canFam2.chr27 45129665 31 + 48908698 TTTGACTCTGTGCTCTTATCAGGCCCAAGGG------------------
i canFam2.chr27 C 0 I 1664
e danRer3.chr18 2360867 428 + 50308305 I
e oryCun1.scaffold_139397 643 1271 - 4771 I
e loxAfr1.scaffold_5603 58454 1915 + 68791 I
e echTel1.scaffold_212365 4641 1430 + 9822 I
e echTel1.scaffold_212365 4641 1430 + 9822 I
e rn3.chr4 29161032 1524 - 187371129 I
e mm7.chr6 28091695 3290 - 149646834 I
"""
# A MAF to test slicing upon
test_maf_3 = """##maf version=1 scoring=none
a score=0
s apple 34 64 + 110 AGGGA---GTTCGTCACT------GTCGTAAGGGTTCAGA--CTGTCTATGTATACACAAGTTGTGTTGCA--ACCG
s orange 19 61 - 100 AGGGATGCGTT--TCACTGCTATCGTCGTA----TTCAGACTTCG-CTATCT------GAGTTGT---GCATTACCG
"""
complex_maf = align.Alignment()
complex_maf.score = "7009"
complex_maf.components.append(align.Component(src="human_hoxa", start=100, size=8, strand="+", src_size=100257, text="ACA-TTACT"))
complex_maf.components.append(align.Component(src="horse_hoxa", start=120, size=9, strand="-", src_size=98892, text="ACAATTGCT"))
complex_maf.components[-1].synteny_left = (maf.MAF_NEW_STATUS, 0)
complex_maf.components[-1].synteny_right = (maf.MAF_CONTIG_STATUS, 0)
complex_maf.components.append(align.Component(src="unknown_1", start=150, size=3, strand="-", src_size=98892, text="---ATT---"))
complex_maf.components.append(align.Component(src="unknown_2", start=12, size=1000, strand="+", src_size=1200, text=None))
complex_maf.components[-1].empty = True
complex_maf.components[-1].synteny_empty = maf.MAF_INSERT_STATUS
complex_maf.text_size = 9
def test_reader():
reader = maf.Reader(StringIO(test_maf))
assert reader.attributes["version"] == "1"
assert reader.attributes["scoring"] == "humor.v4"
a = next(reader)
assert a.score == 0.128
assert len(a.components) == 3
check_component(a.components[0], "human_hoxa", 100, 8, "+", 100257, "ACA-TTACT")
check_component(a.components[1], "horse_hoxa", 120, 9, "-", 98892, "ACAATTGCT")
check_component(a.components[2], "fugu_hoxa", 88, 7, "+", 90788, "ACA--TGCT")
a = next(reader)
assert a.score == 0.071
assert len(a.components) == 3
check_component(a.components[0], "human_unc", 9077, 8, "+", 10998, "ACAGTATT")
check_component(a.components[1], "horse_unc", 4555, 6, "-", 5099, "ACA--ATT")
check_component(a.components[2], "fugu_unc", 4000, 4, "+", 4038, "AC----TT")
a = next(reader)
assert a is None
reader.close()
def test_writer():
val = StringIO()
writer = maf.Writer(val, {'scoring': 'foobar'})
a = align.Alignment()
a.score = 7009
a.components.append(align.Component(src="human_hoxa", start=100, size=9, strand="+", src_size=1000257, text="ACA-TTACT"))
a.components.append(align.Component(src="horse_hoxa", start=120, size=10, strand="-", src_size=98892, text="ACAATTGCT"))
check_component(a.components[0], "human_hoxa", 100, 9, "+", 1000257, "ACA-TTACT")
check_component(a.components[1], "horse_hoxa", 120, 10, "-", 98892, "ACAATTGCT")
writer.write(a)
assert val.getvalue() == """##maf version=1 scoring=foobar
a score=7009
s human_hoxa 100 9 + 1000257 ACA-TTACT
s horse_hoxa 120 10 - 98892 ACAATTGCT
""" # noqa: W291
def test_slice():
b = complex_maf.slice_by_component(0, 101, 105)
check_component(b.components[0], src="human_hoxa", start=101, size=4, strand="+", src_size=100257, text="CA-TT")
check_component(b.components[1], src="horse_hoxa", start=121, size=5, strand="-", src_size=98892, text="CAATT")
check_component(b.components[2], src="unknown_1", start=150, size=3, strand="-", src_size=98892, text="--ATT")
check_component(b.components[3], src="unknown_2", start=12, size=1000, strand="+", src_size=1200, text=None)
assert b.components[3].empty
assert b.components[3].synteny_empty == maf.MAF_INSERT_STATUS
# test slicing with + strand src
reader = maf.Reader(StringIO(test_maf_3))
a = next(reader)
b = a.slice_by_component(0, 40, 62)
check_component(b.components[0], src="apple", start=40, size=22, strand="+", src_size=110, text="TTCGTCACT------GTCGTAAGGGTTC")
check_component(b.components[1], src="orange", start=28, size=22, strand="-", src_size=100, text="TT--TCACTGCTATCGTCGTA----TTC")
# test slicing with - strand src
b = a.slice_by_component(1, 30, 68)
check_component(b.components[0], src="apple", start=46, size=41, strand="+", src_size=110, text="ACT------GTCGTAAGGGTTCAGA--CTGTCTATGTATACACAAGTTG")
check_component(b.components[1], src="orange", start=32, size=38, strand="-", src_size=100, text="ACTGCTATCGTCGTA----TTCAGACTTCG-CTATCT------GAGTTG")
a = next(reader)
assert a is None
def test_reverse_complement():
b = complex_maf.reverse_complement()
check_component(b.components[0], src="human_hoxa", start=100257-100-8, size=8, strand="-", src_size=100257, text="AGTAA-TGT")
check_component(b.components[1], src="horse_hoxa", start=98892-120-9, size=9, strand="+", src_size=98892, text="AGCAATTGT")
assert b.components[1].synteny_right == (maf.MAF_NEW_STATUS, 0)
assert b.components[1].synteny_left == (maf.MAF_CONTIG_STATUS, 0)
check_component(b.components[2], src="unknown_1", start=98892-150-3, size=3, strand="+", src_size=98892, text="---AAT---")
check_component(b.components[3], src="unknown_2", start=1200-12-1000, size=1000, strand="-", src_size=1200, text=None)
assert b.components[3].empty
assert b.components[3].synteny_empty == maf.MAF_INSERT_STATUS
def test_column_iter():
expected = [['A', 'A', '-'],
['C', 'C', '-'],
['A', 'A', '-'],
['-', 'A', 'A'],
['T', 'T', 'T'],
['T', 'T', 'T'],
['A', 'G', '-'],
['C', 'C', '-'],
['T', 'T', '-']]
for i, c in enumerate(complex_maf.column_iter()):
assert c == expected[i]
def test_remove_all_gap_column():
complex_maf_gap = align.Alignment()
complex_maf_gap.score = "7009"
complex_maf_gap.components.append(align.Component(src="human_hoxa", start=100, size=8, strand="+", src_size=100257, text="-ACA--TTACT"))
complex_maf_gap.components.append(align.Component(src="horse_hoxa", start=120, size=9, strand="-", src_size=98892, text="-ACA-ATTGCT"))
complex_maf_gap.components[-1].synteny_left = (maf.MAF_NEW_STATUS, 0)
complex_maf_gap.components[-1].synteny_right = (maf.MAF_CONTIG_STATUS, 0)
complex_maf_gap.components.append(align.Component(src="unknown_1", start=150, size=3, strand="-", src_size=98892, text="-----ATT---"))
complex_maf_gap.components.append(align.Component(src="unknown_2", start=12, size=1000, strand="+", src_size=1200, text=None))
complex_maf_gap.components[-1].empty = True
complex_maf_gap.components[-1].synteny_empty = maf.MAF_INSERT_STATUS
complex_maf_gap.text_size = 11
complex_maf_gap.remove_all_gap_columns()
assert complex_maf_gap == complex_maf
def test_read_with_synteny():
reader = maf.Reader(StringIO(test_maf_2), parse_e_rows=True)
a = next(reader)
check_component(a.components[0], "hg17.chr1", 2005, 34, "+", 245522847, "TGTAACTTAATACCACAACCAGGCATAGGGG--AAA-------------")
check_component(a.components[1], "rheMac2.chr11", 9625228, 31, "+", 134511895, "TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------")
print(a.components[1].synteny_left)
assert a.components[1].synteny_left == (maf.MAF_CONTIG_STATUS, 0)
assert a.components[1].synteny_right == (maf.MAF_INSERT_STATUS, 1678)
rat = a.get_component_by_src_start("rn3.")
check_component(rat, "rn3.chr4", 29161032, 1524, "-", 187371129, None)
assert rat.synteny_empty == maf.MAF_INSERT_STATUS
def test_write_with_synteny():
reader = maf.Reader(StringIO(test_maf_2), parse_e_rows=True)
a = next(reader)
val = StringIO()
writer = maf.Writer(val, {'scoring': 'foobar'})
writer.write(a)
actual = val.getvalue()
expected = """##maf version=1 scoring=foobar
a score=3656.0
s hg17.chr1 2005 34 + 245522847 TGTAACTTAATACCACAACCAGGCATAGGGG--AAA-------------
s rheMac2.chr11 9625228 31 + 134511895 TGTAACCTCTTACTGCAACAAGGCACAGGGG------------------
i rheMac2.chr11 C 0 I 1678
s panTro1.chr1 2014 34 + 229575298 TGTAACTTAATACCACAACCAGGCATGGGGG--AAA-------------
i panTro1.chr1 C 0 C 0
s bosTau2.chr5 64972365 47 + 76426644 TCCAGCCATGTGTTGTGATCAG--CCAGGGGCTAAAGCCATGGCGGTAG
i bosTau2.chr5 C 0 I 1462
s canFam2.chr27 45129665 31 + 48908698 TTTGACTCTGTGCTCTTATCAGGCCCAAGGG------------------
i canFam2.chr27 C 0 I 1664
e danRer3.chr18 2360867 428 + 50308305 I
e oryCun1.scaffold_139397 643 1271 - 4771 I
e loxAfr1.scaffold_5603 58454 1915 + 68791 I
e echTel1.scaffold_212365 4641 1430 + 9822 I
e echTel1.scaffold_212365 4641 1430 + 9822 I
e rn3.chr4 29161032 1524 - 187371129 I
e mm7.chr6 28091695 3290 - 149646834 I
""" # noqa: W291
print(actual)
print("---")
print(expected)
assert actual == expected
def check_component(c, src, start, size, strand, src_size, text):
assert c.src == src
assert c.start == start
assert c.size == size
assert c.strand == strand
assert c.src_size == src_size
assert c.text == text
| |
import unittest
from datetime import date
from enum import Enum
from pypika import (
AliasedQuery,
Case,
ClickHouseQuery,
EmptyCriterion,
Field as F,
Index,
MSSQLQuery,
MySQLQuery,
NullValue,
OracleQuery,
Order,
PostgreSQLQuery,
Query,
QueryException,
RedshiftQuery,
SQLLiteQuery,
Table,
Tables,
VerticaQuery,
functions as fn,
SYSTEM_TIME,
)
from pypika.terms import ValueWrapper
__author__ = "Timothy Heys"
__email__ = "theys@kayak.com"
class SelectTests(unittest.TestCase):
table_abc, table_efg = Tables("abc", "efg")
def test_empty_query(self):
q = Query.from_("abc")
self.assertEqual("", str(q))
def test_select_no_from(self):
q = Query.select(1)
self.assertEqual("SELECT 1", str(q))
def test_select_no_with_alias_from(self):
q = Query.select(ValueWrapper(1, "test"))
self.assertEqual('SELECT 1 "test"', str(q))
def test_select_no_from_with_field_raises_exception(self):
with self.assertRaises(QueryException):
Query.select("asdf")
def test_select__star(self):
q = Query.from_("abc").select("*")
self.assertEqual('SELECT * FROM "abc"', str(q))
def test_select__table_schema(self):
q = Query.from_(Table("abc", "schema1")).select("*")
self.assertEqual('SELECT * FROM "schema1"."abc"', str(q))
def test_select__table_schema_with_multiple_levels_as_tuple(self):
q = Query.from_(Table("abc", ("schema1", "schema2"))).select("*")
self.assertEqual('SELECT * FROM "schema1"."schema2"."abc"', str(q))
def test_select__table_schema_with_multiple_levels_as_list(self):
q = Query.from_(Table("abc", ["schema1", "schema2"])).select("*")
self.assertEqual('SELECT * FROM "schema1"."schema2"."abc"', str(q))
def test_select__star__replacement(self):
q = Query.from_("abc").select("foo").select("*")
self.assertEqual('SELECT * FROM "abc"', str(q))
def test_select__distinct__single(self):
q = Query.from_("abc").select("foo").distinct()
self.assertEqual('SELECT DISTINCT "foo" FROM "abc"', str(q))
def test_select__distinct__multi(self):
q = Query.from_("abc").select("foo", "bar").distinct()
self.assertEqual('SELECT DISTINCT "foo","bar" FROM "abc"', str(q))
def test_select__column__single__str(self):
q = Query.from_("abc").select("foo")
self.assertEqual('SELECT "foo" FROM "abc"', str(q))
def test_select__column__single__alias__str(self):
q = Query.from_(self.table_abc).select(self.table_abc.foo.as_("bar"))
self.assertEqual('SELECT "foo" "bar" FROM "abc"', str(q))
def test_select__column__single__table_alias__str(self):
q = Query.from_(self.table_abc.as_("fizzbuzz")).select(self.table_abc.foo.as_("bar"))
self.assertEqual('SELECT "foo" "bar" FROM "abc" "fizzbuzz"', str(q))
def test_select__column__single__field(self):
t = Table("abc")
q = Query.from_(t).select(t.foo)
self.assertEqual('SELECT "foo" FROM "abc"', str(q))
def test_select__columns__multi__str(self):
q1 = Query.from_("abc").select("foo", "bar")
q2 = Query.from_("abc").select("foo").select("bar")
self.assertEqual('SELECT "foo","bar" FROM "abc"', str(q1))
self.assertEqual('SELECT "foo","bar" FROM "abc"', str(q2))
def test_select__columns__multi__field(self):
q1 = Query.from_(self.table_abc).select(self.table_abc.foo, self.table_abc.bar)
q2 = Query.from_(self.table_abc).select(self.table_abc.foo).select(self.table_abc.bar)
self.assertEqual('SELECT "foo","bar" FROM "abc"', str(q1))
self.assertEqual('SELECT "foo","bar" FROM "abc"', str(q2))
def test_select__multiple_tables(self):
q = Query.from_(self.table_abc).select(self.table_abc.foo).from_(self.table_efg).select(self.table_efg.bar)
self.assertEqual('SELECT "abc"."foo","efg"."bar" FROM "abc","efg"', str(q))
def test_select__subquery(self):
subquery = Query.from_(self.table_abc).select("*")
q = Query.from_(subquery).select(subquery.foo, subquery.bar)
self.assertEqual('SELECT "sq0"."foo","sq0"."bar" ' 'FROM (SELECT * FROM "abc") "sq0"', str(q))
def test_select__multiple_subqueries(self):
subquery0 = Query.from_(self.table_abc).select("foo")
subquery1 = Query.from_(self.table_efg).select("bar")
q = Query.from_(subquery0).from_(subquery1).select(subquery0.foo, subquery1.bar)
self.assertEqual(
'SELECT "sq0"."foo","sq1"."bar" ' 'FROM (SELECT "foo" FROM "abc") "sq0",' '(SELECT "bar" FROM "efg") "sq1"',
str(q),
)
def test_select__nested_subquery(self):
subquery0 = Query.from_(self.table_abc).select("*")
subquery1 = Query.from_(subquery0).select(subquery0.foo, subquery0.bar)
subquery2 = Query.from_(subquery1).select(subquery1.foo)
q = Query.from_(subquery2).select(subquery2.foo)
self.assertEqual(
'SELECT "sq2"."foo" '
'FROM (SELECT "sq1"."foo" '
'FROM (SELECT "sq0"."foo","sq0"."bar" '
'FROM (SELECT * FROM "abc") "sq0") "sq1") "sq2"',
str(q),
)
def test_select__no_table(self):
q = Query.select(1, 2, 3)
self.assertEqual("SELECT 1,2,3", str(q))
def test_select_then_add_table(self):
q = Query.select(1).select(2, 3).from_("abc").select("foo")
self.assertEqual('SELECT 1,2,3,"foo" FROM "abc"', str(q))
def test_select_with_limit(self):
q1 = Query.from_("abc").select("foo")[:10]
self.assertEqual('SELECT "foo" FROM "abc" LIMIT 10', str(q1))
def test_select_with_limit_zero(self):
with self.subTest('using python slice'):
q1 = Query.from_("abc").select("foo")[:0]
self.assertEqual('SELECT "foo" FROM "abc" LIMIT 0', str(q1))
with self.subTest('using limit method'):
q2 = Query.from_("abc").select("foo").limit(0)
self.assertEqual('SELECT "foo" FROM "abc" LIMIT 0', str(q2))
def test_select_with_limit__func(self):
q1 = Query.from_("abc").select("foo").limit(10)
self.assertEqual('SELECT "foo" FROM "abc" LIMIT 10', str(q1))
def test_select_with_offset(self):
q1 = Query.from_("abc").select("foo")[10:]
self.assertEqual('SELECT "foo" FROM "abc" OFFSET 10', str(q1))
def test_select_with_offset__func(self):
q1 = Query.from_("abc").select("foo").offset(10)
self.assertEqual('SELECT "foo" FROM "abc" OFFSET 10', str(q1))
def test_select_with_limit_and_offset(self):
q1 = Query.from_("abc").select("foo")[10:10]
self.assertEqual('SELECT "foo" FROM "abc" LIMIT 10 OFFSET 10', str(q1))
def test_select_with_force_index(self):
q = Query.from_("abc").select("foo").force_index("egg")
self.assertEqual('SELECT "foo" FROM "abc" FORCE INDEX ("egg")', str(q))
def test_select_with_force_index_with_index_object(self):
index = Index("egg")
q = Query.from_("abc").select("foo").force_index(index)
self.assertEqual('SELECT "foo" FROM "abc" FORCE INDEX ("egg")', str(q))
def test_select_with_force_index_multiple_indexes(self):
q = Query.from_("abc").select("foo").force_index("egg", "bacon")
self.assertEqual('SELECT "foo" FROM "abc" FORCE INDEX ("egg","bacon")', str(q))
def test_select_with_force_index_multiple_calls(self):
q = (
Query.from_("abc")
.select("foo")
.force_index(
"egg",
)
.force_index("spam")
)
self.assertEqual('SELECT "foo" FROM "abc" FORCE INDEX ("egg","spam")', str(q))
def test_select_with_use_index(self):
q = Query.from_("abc").select("foo").use_index("egg")
self.assertEqual('SELECT "foo" FROM "abc" USE INDEX ("egg")', str(q))
def test_select_with_use_index_with_index_object(self):
index = Index("egg")
q = Query.from_("abc").select("foo").use_index(index)
self.assertEqual('SELECT "foo" FROM "abc" USE INDEX ("egg")', str(q))
def test_select_with_use_index_multiple_indexes(self):
q = Query.from_("abc").select("foo").use_index("egg", "bacon")
self.assertEqual('SELECT "foo" FROM "abc" USE INDEX ("egg","bacon")', str(q))
def test_select_with_use_index_multiple_calls(self):
q = (
Query.from_("abc")
.select("foo")
.use_index(
"egg",
)
.use_index("spam")
)
self.assertEqual('SELECT "foo" FROM "abc" USE INDEX ("egg","spam")', str(q))
def test_mysql_query_uses_backtick_quote_chars(self):
q = MySQLQuery.from_("abc").select("foo", "bar")
self.assertEqual("SELECT `foo`,`bar` FROM `abc`", str(q))
def test_vertica_query_uses_double_quote_chars(self):
q = VerticaQuery.from_("abc").select("foo", "bar")
self.assertEqual('SELECT "foo","bar" FROM "abc"', str(q))
def test_mssql_query_uses_double_quote_chars(self):
q = MSSQLQuery.from_("abc").select("foo", "bar")
self.assertEqual('SELECT "foo","bar" FROM "abc"', str(q))
def test_oracle_query_uses_no_quote_chars(self):
q = OracleQuery.from_("abc").select("foo", "bar")
self.assertEqual('SELECT foo,bar FROM abc', str(q))
def test_postgresql_query_uses_double_quote_chars(self):
q = PostgreSQLQuery.from_("abc").select("foo", "bar")
self.assertEqual('SELECT "foo","bar" FROM "abc"', str(q))
def test_redshift_query_uses_double_quote_chars(self):
q = RedshiftQuery.from_("abc").select("foo", "bar")
self.assertEqual('SELECT "foo","bar" FROM "abc"', str(q))
def test_table_select_alias(self):
q = self.table_abc.select(1)
self.assertEqual('SELECT 1 FROM "abc"', str(q))
self.assertEqual(q, Query.from_("abc").select(1))
def test_table_select_alias_with_offset_and_limit(self):
self.assertEqual(self.table_abc.select("foo")[10:10], Query.from_("abc").select("foo")[10:10])
self.assertEqual(
self.table_abc.select(self.table_abc.foo)[10:10],
Query.from_("abc").select("foo")[10:10],
)
def test_temporal_select(self):
t = Table("abc")
with self.subTest("with system time as of"):
q = Query.from_(t.for_(SYSTEM_TIME.as_of('2020-01-01'))).select("*")
self.assertEqual('SELECT * FROM "abc" FOR SYSTEM_TIME AS OF \'2020-01-01\'', str(q))
with self.subTest("with system time between"):
q = Query.from_(t.for_(SYSTEM_TIME.between('2020-01-01', '2020-02-01'))).select("*")
self.assertEqual('SELECT * FROM "abc" FOR SYSTEM_TIME BETWEEN \'2020-01-01\' AND \'2020-02-01\'', str(q))
with self.subTest("with system time from to"):
q = Query.from_(t.for_(SYSTEM_TIME.from_to('2020-01-01', '2020-02-01'))).select("*")
self.assertEqual('SELECT * FROM "abc" FOR SYSTEM_TIME FROM \'2020-01-01\' TO \'2020-02-01\'', str(q))
with self.subTest("with ALL"):
q = Query.from_(t.for_(SYSTEM_TIME.all_())).select("*")
self.assertEqual('SELECT * FROM "abc" FOR SYSTEM_TIME ALL', str(q))
with self.subTest("with period between"):
q = Query.from_(t.for_(t.valid_period.between('2020-01-01', '2020-02-01'))).select("*")
self.assertEqual('SELECT * FROM "abc" FOR "valid_period" BETWEEN \'2020-01-01\' AND \'2020-02-01\'', str(q))
with self.subTest("with period from to"):
q = Query.from_(t.for_(t.valid_period.from_to('2020-01-01', '2020-02-01'))).select("*")
self.assertEqual('SELECT * FROM "abc" FOR "valid_period" FROM \'2020-01-01\' TO \'2020-02-01\'', str(q))
with self.subTest("with ALL"):
q = Query.from_(t.for_(t.valid_period.all_())).select("*")
self.assertEqual('SELECT * FROM "abc" FOR "valid_period" ALL', str(q))
class MyEnum(Enum):
STR = "foo"
INT = 0
BOOL = True
DATE = date(2020, 2, 2)
NONE = None
class WhereTests(unittest.TestCase):
t = Table("abc")
t2 = Table("cba")
def test_where_enum(self):
q1 = Query.from_(self.t).select("*").where(self.t.foo == MyEnum.STR)
q2 = Query.from_(self.t).select("*").where(self.t.foo == MyEnum.INT)
q3 = Query.from_(self.t).select("*").where(self.t.foo == MyEnum.BOOL)
q4 = Query.from_(self.t).select("*").where(self.t.foo == MyEnum.DATE)
q5 = Query.from_(self.t).select("*").where(self.t.foo == MyEnum.NONE)
self.assertEqual('SELECT * FROM "abc" WHERE "foo"=\'foo\'', str(q1))
self.assertEqual('SELECT * FROM "abc" WHERE "foo"=0', str(q2))
self.assertEqual('SELECT * FROM "abc" WHERE "foo"=true', str(q3))
self.assertEqual('SELECT * FROM "abc" WHERE "foo"=\'2020-02-02\'', str(q4))
self.assertEqual('SELECT * FROM "abc" WHERE "foo"=null', str(q5))
def test_where_field_equals(self):
q1 = Query.from_(self.t).select("*").where(self.t.foo == self.t.bar)
q2 = Query.from_(self.t).select("*").where(self.t.foo.eq(self.t.bar))
self.assertEqual('SELECT * FROM "abc" WHERE "foo"="bar"', str(q1))
self.assertEqual('SELECT * FROM "abc" WHERE "foo"="bar"', str(q2))
q = self.t.select("*").where(self.t.foo == self.t.bar)
self.assertEqual(q, q1)
def test_where_field_equals_for_update(self):
q = Query.from_(self.t).select("*").where(self.t.foo == self.t.bar).for_update()
self.assertEqual('SELECT * FROM "abc" WHERE "foo"="bar" FOR UPDATE', str(q))
def test_where_field_equals_for_update_nowait(self):
for query_cls in [
MySQLQuery,
PostgreSQLQuery,
]:
quote_char = query_cls._builder().QUOTE_CHAR if isinstance(query_cls._builder().QUOTE_CHAR, str) else '"'
q = query_cls.from_(self.t).select("*").where(self.t.foo == self.t.bar).for_update(nowait=True)
self.assertEqual(
'SELECT * '
'FROM {quote_char}abc{quote_char} '
'WHERE {quote_char}foo{quote_char}={quote_char}bar{quote_char} '
'FOR UPDATE NOWAIT'.format(
quote_char=quote_char,
),
str(q),
)
def test_where_field_equals_for_update_skip_locked(self):
for query_cls in [
MySQLQuery,
PostgreSQLQuery,
]:
quote_char = query_cls._builder().QUOTE_CHAR if isinstance(query_cls._builder().QUOTE_CHAR, str) else '"'
q = query_cls.from_(self.t).select("*").where(self.t.foo == self.t.bar).for_update(skip_locked=True)
self.assertEqual(
'SELECT * '
'FROM {quote_char}abc{quote_char} '
'WHERE {quote_char}foo{quote_char}={quote_char}bar{quote_char} '
'FOR UPDATE SKIP LOCKED'.format(
quote_char=quote_char,
),
str(q),
)
def test_where_field_equals_for_update_of(self):
for query_cls in [
MySQLQuery,
PostgreSQLQuery,
]:
quote_char = query_cls._builder().QUOTE_CHAR if isinstance(query_cls._builder().QUOTE_CHAR, str) else '"'
q = query_cls.from_(self.t).select("*").where(self.t.foo == self.t.bar).for_update(of=("abc",))
self.assertEqual(
'SELECT * '
'FROM {quote_char}abc{quote_char} '
'WHERE {quote_char}foo{quote_char}={quote_char}bar{quote_char} '
'FOR UPDATE OF {quote_char}abc{quote_char}'.format(
quote_char=quote_char,
),
str(q),
)
def test_where_field_equals_for_update_of_multiple_tables(self):
for query_cls in [
MySQLQuery,
PostgreSQLQuery,
]:
q = (
query_cls.from_(self.t)
.join(self.t2)
.on(self.t.id == self.t2.abc_id)
.select("*")
.where(self.t.foo == self.t.bar)
.for_update(of=("abc", "cba"))
)
quote_char = query_cls._builder().QUOTE_CHAR if isinstance(query_cls._builder().QUOTE_CHAR, str) else '"'
self.assertIn(
str(q),
[
'SELECT * '
'FROM {quote_char}abc{quote_char} '
'JOIN {quote_char}cba{quote_char} '
'ON {quote_char}abc{quote_char}.{quote_char}id{quote_char}='
'{quote_char}cba{quote_char}.{quote_char}abc_id{quote_char} '
'WHERE {quote_char}abc{quote_char}.{quote_char}foo{quote_char}='
'{quote_char}abc{quote_char}.{quote_char}bar{quote_char} '
'FOR UPDATE OF {quote_char}cba{quote_char}, {quote_char}abc{quote_char}'.format(
quote_char=quote_char,
),
'SELECT * '
'FROM {quote_char}abc{quote_char} '
'JOIN {quote_char}cba{quote_char} '
'ON {quote_char}abc{quote_char}.{quote_char}id{quote_char}='
'{quote_char}cba{quote_char}.{quote_char}abc_id{quote_char} '
'WHERE {quote_char}abc{quote_char}.{quote_char}foo{quote_char}='
'{quote_char}abc{quote_char}.{quote_char}bar{quote_char} '
'FOR UPDATE OF {quote_char}abc{quote_char}, {quote_char}cba{quote_char}'.format(
quote_char=quote_char,
),
],
)
def test_where_field_equals_for_update_of_nowait(self):
for query_cls in [
MySQLQuery,
PostgreSQLQuery,
]:
q = query_cls.from_(self.t).select("*").where(self.t.foo == self.t.bar).for_update(of=("abc",), nowait=True)
quote_char = query_cls._builder().QUOTE_CHAR if isinstance(query_cls._builder().QUOTE_CHAR, str) else '"'
self.assertEqual(
'SELECT * '
'FROM {quote_char}abc{quote_char} '
'WHERE {quote_char}foo{quote_char}={quote_char}bar{quote_char} '
'FOR UPDATE OF {quote_char}abc{quote_char} NOWAIT'.format(
quote_char=quote_char,
),
str(q),
)
def test_where_field_equals_for_update_of_skip_locked(self):
for query_cls in [
MySQLQuery,
PostgreSQLQuery,
]:
q = (
query_cls.from_(self.t)
.select("*")
.where(self.t.foo == self.t.bar)
.for_update(of=("abc",), skip_locked=True)
)
quote_char = query_cls._builder().QUOTE_CHAR if isinstance(query_cls._builder().QUOTE_CHAR, str) else '"'
self.assertEqual(
'SELECT * '
'FROM {quote_char}abc{quote_char} '
'WHERE {quote_char}foo{quote_char}={quote_char}bar{quote_char} '
'FOR UPDATE OF {quote_char}abc{quote_char} SKIP LOCKED'.format(
quote_char=quote_char,
),
str(q),
)
def test_where_field_equals_for_update_skip_locked_and_of(self):
for query_cls in [
MySQLQuery,
PostgreSQLQuery,
]:
q = (
query_cls.from_(self.t)
.select("*")
.where(self.t.foo == self.t.bar)
.for_update(nowait=False, skip_locked=True, of=("abc",))
)
quote_char = query_cls._builder().QUOTE_CHAR if isinstance(query_cls._builder().QUOTE_CHAR, str) else '"'
self.assertEqual(
'SELECT * '
'FROM {quote_char}abc{quote_char} '
'WHERE {quote_char}foo{quote_char}={quote_char}bar{quote_char} '
'FOR UPDATE OF {quote_char}abc{quote_char} SKIP LOCKED'.format(
quote_char=quote_char,
),
str(q),
)
def test_where_field_equals_where(self):
q = Query.from_(self.t).select("*").where(self.t.foo == 1).where(self.t.bar == self.t.baz)
self.assertEqual('SELECT * FROM "abc" WHERE "foo"=1 AND "bar"="baz"', str(q))
def test_where_field_equals_where_not(self):
q = Query.from_(self.t).select("*").where((self.t.foo == 1).negate()).where(self.t.bar == self.t.baz)
self.assertEqual('SELECT * FROM "abc" WHERE NOT "foo"=1 AND "bar"="baz"', str(q))
def test_where_field_equals_where_two_not(self):
q = Query.from_(self.t).select("*").where((self.t.foo == 1).negate()).where((self.t.bar == self.t.baz).negate())
self.assertEqual('SELECT * FROM "abc" WHERE NOT "foo"=1 AND NOT "bar"="baz"', str(q))
def test_where_single_quote(self):
q1 = Query.from_(self.t).select("*").where(self.t.foo == "bar'foo")
self.assertEqual("SELECT * FROM \"abc\" WHERE \"foo\"='bar''foo'", str(q1))
def test_where_field_equals_and(self):
q = Query.from_(self.t).select("*").where((self.t.foo == 1) & (self.t.bar == self.t.baz))
self.assertEqual('SELECT * FROM "abc" WHERE "foo"=1 AND "bar"="baz"', str(q))
def test_where_field_equals_or(self):
q = Query.from_(self.t).select("*").where((self.t.foo == 1) | (self.t.bar == self.t.baz))
self.assertEqual('SELECT * FROM "abc" WHERE "foo"=1 OR "bar"="baz"', str(q))
def test_where_nested_conditions(self):
q = Query.from_(self.t).select("*").where((self.t.foo == 1) | (self.t.bar == self.t.baz)).where(self.t.baz == 0)
self.assertEqual('SELECT * FROM "abc" WHERE ("foo"=1 OR "bar"="baz") AND "baz"=0', str(q))
def test_where_field_starts_with(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.like("ab%"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" LIKE \'ab%\'', str(q))
def test_where_field_contains(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.like("%fg%"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" LIKE \'%fg%\'', str(q))
def test_where_field_ends_with(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.like("%yz"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" LIKE \'%yz\'', str(q))
def test_where_field_is_n_chars_long(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.like("___"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" LIKE \'___\'', str(q))
def test_where_field_does_not_start_with(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.not_like("ab%"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" NOT LIKE \'ab%\'', str(q))
def test_where_field_does_not_contain(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.not_like("%fg%"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" NOT LIKE \'%fg%\'', str(q))
def test_where_field_does_not_end_with(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.not_like("%yz"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" NOT LIKE \'%yz\'', str(q))
def test_where_field_is_not_n_chars_long(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.not_like("___"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" NOT LIKE \'___\'', str(q))
def test_where_field_matches_regex(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.regex(r"^b"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" REGEX \'^b\'', str(q))
def test_where_field_matches_regexp(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.regexp(r"^b"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" REGEXP \'^b\'', str(q))
def test_where_field_matches_rlike(self):
q = Query.from_(self.t).select(self.t.star).where(self.t.foo.rlike(r"^b"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" RLIKE \'^b\'', str(q))
def test_ignore_empty_criterion_where(self):
q1 = Query.from_(self.t).select("*").where(EmptyCriterion())
self.assertEqual('SELECT * FROM "abc"', str(q1))
def test_ignore_empty_criterion_having(self):
q1 = Query.from_(self.t).select("*").having(EmptyCriterion())
self.assertEqual('SELECT * FROM "abc"', str(q1))
def test_select_with_force_index_and_where(self):
q = Query.from_("abc").select("foo").where(self.t.foo == self.t.bar).force_index("egg")
self.assertEqual('SELECT "foo" FROM "abc" FORCE INDEX ("egg") WHERE "foo"="bar"', str(q))
def test_where_with_multiple_wheres_using_and_case(self):
case_stmt = Case().when(self.t.foo == 'bar', 1).else_(0)
query = Query.from_(self.t).select(case_stmt).where(case_stmt & self.t.blah.isin(['test']))
self.assertEqual(
'SELECT CASE WHEN "foo"=\'bar\' THEN 1 ELSE 0 END FROM "abc" WHERE CASE WHEN "foo"=\'bar\' THEN 1 ELSE 0 '
'END AND "blah" IN (\'test\')',
str(query),
)
def test_where_with_multiple_wheres_using_or_case(self):
case_stmt = Case().when(self.t.foo == 'bar', 1).else_(0)
query = Query.from_(self.t).select(case_stmt).where(case_stmt | self.t.blah.isin(['test']))
self.assertEqual(
'SELECT CASE WHEN "foo"=\'bar\' THEN 1 ELSE 0 END FROM "abc" WHERE CASE WHEN "foo"=\'bar\' THEN 1 ELSE 0 '
'END OR "blah" IN (\'test\')',
str(query),
)
class PreWhereTests(WhereTests):
t = Table("abc")
def test_prewhere_field_equals(self):
q1 = Query.from_(self.t).select("*").prewhere(self.t.foo == self.t.bar)
q2 = Query.from_(self.t).select("*").prewhere(self.t.foo.eq(self.t.bar))
self.assertEqual('SELECT * FROM "abc" PREWHERE "foo"="bar"', str(q1))
self.assertEqual('SELECT * FROM "abc" PREWHERE "foo"="bar"', str(q2))
def test_where_and_prewhere(self):
q = Query.from_(self.t).select("*").prewhere(self.t.foo == self.t.bar).where(self.t.foo == self.t.bar)
self.assertEqual('SELECT * FROM "abc" PREWHERE "foo"="bar" WHERE "foo"="bar"', str(q))
class GroupByTests(unittest.TestCase):
t = Table("abc")
maxDiff = None
def test_groupby__single(self):
q = Query.from_(self.t).groupby(self.t.foo).select(self.t.foo)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo"', str(q))
def test_groupby__multi(self):
q = Query.from_(self.t).groupby(self.t.foo, self.t.bar).select(self.t.foo, self.t.bar)
self.assertEqual('SELECT "foo","bar" FROM "abc" GROUP BY "foo","bar"', str(q))
def test_groupby__count_star(self):
q = Query.from_(self.t).groupby(self.t.foo).select(self.t.foo, fn.Count("*"))
self.assertEqual('SELECT "foo",COUNT(*) FROM "abc" GROUP BY "foo"', str(q))
def test_groupby__count_field(self):
q = Query.from_(self.t).groupby(self.t.foo).select(self.t.foo, fn.Count(self.t.bar))
self.assertEqual('SELECT "foo",COUNT("bar") FROM "abc" GROUP BY "foo"', str(q))
def test_groupby__count_distinct(self):
q = Query.from_(self.t).groupby(self.t.foo).select(self.t.foo, fn.Count("*").distinct())
self.assertEqual('SELECT "foo",COUNT(DISTINCT *) FROM "abc" GROUP BY "foo"', str(q))
def test_groupby__sum_distinct(self):
q = Query.from_(self.t).groupby(self.t.foo).select(self.t.foo, fn.Sum(self.t.bar).distinct())
self.assertEqual('SELECT "foo",SUM(DISTINCT "bar") FROM "abc" GROUP BY "foo"', str(q))
def test_groupby__sum_filter(self):
q = (
Query.from_(self.t)
.groupby(self.t.foo)
.select(self.t.foo, fn.Sum(self.t.bar).filter(self.t.id.eq(1) & self.t.cid.gt(2)))
)
self.assertEqual('SELECT "foo",SUM("bar") FILTER(WHERE "id"=1 AND "cid">2) FROM "abc" GROUP BY "foo"', str(q))
def test_groupby__str(self):
q = Query.from_("abc").groupby("foo").select("foo", fn.Count("*").distinct())
self.assertEqual('SELECT "foo",COUNT(DISTINCT *) FROM "abc" GROUP BY "foo"', str(q))
def test_groupby__int(self):
q = Query.from_("abc").groupby(1).select("foo", fn.Count("*").distinct())
self.assertEqual('SELECT "foo",COUNT(DISTINCT *) FROM "abc" GROUP BY 1', str(q))
def test_groupby__alias(self):
bar = self.t.bar.as_("bar01")
q = Query.from_(self.t).select(fn.Sum(self.t.foo), bar).groupby(bar)
self.assertEqual('SELECT SUM("foo"),"bar" "bar01" FROM "abc" GROUP BY "bar01"', str(q))
def test_groupby__no_alias(self):
bar = self.t.bar.as_("bar01")
q = Query.from_(self.t).select(fn.Sum(self.t.foo), bar).groupby(bar)
self.assertEqual(
'SELECT SUM("foo"),"bar" "bar01" FROM "abc" GROUP BY "bar"',
q.get_sql(groupby_alias=False),
)
def test_groupby__no_alias_mssql(self):
bar = self.t.bar.as_("bar01")
q = MSSQLQuery.from_(self.t).select(fn.Sum(self.t.foo), bar).groupby(bar)
self.assertEqual('SELECT SUM("foo"),"bar" "bar01" FROM "abc" GROUP BY "bar"', str(q))
def test_groupby__no_alias_oracle(self):
bar = self.t.bar.as_("bar01")
q = OracleQuery.from_(self.t).select(fn.Sum(self.t.foo), bar).groupby(bar)
self.assertEqual('SELECT SUM(foo),bar bar01 FROM abc GROUP BY bar', str(q))
def test_groupby__alias_platforms(self):
bar = self.t.bar.as_("bar01")
for query_cls in [
MySQLQuery,
VerticaQuery,
PostgreSQLQuery,
RedshiftQuery,
ClickHouseQuery,
SQLLiteQuery,
]:
q = query_cls.from_(self.t).select(fn.Sum(self.t.foo), bar).groupby(bar)
quote_char = query_cls._builder().QUOTE_CHAR if isinstance(query_cls._builder().QUOTE_CHAR, str) else '"'
self.assertEqual(
"SELECT "
"SUM({quote_char}foo{quote_char}),"
"{quote_char}bar{quote_char}{as_keyword}{quote_char}bar01{quote_char} "
"FROM {quote_char}abc{quote_char} "
"GROUP BY {quote_char}bar01{quote_char}".format(
as_keyword=' AS ' if query_cls is ClickHouseQuery else ' ', quote_char=quote_char
),
str(q),
)
def test_groupby__alias_with_join(self):
table1 = Table("table1", alias="t1")
bar = table1.bar.as_("bar01")
q = Query.from_(self.t).join(table1).on(self.t.id == table1.t_ref).select(fn.Sum(self.t.foo), bar).groupby(bar)
self.assertEqual(
'SELECT SUM("abc"."foo"),"t1"."bar" "bar01" FROM "abc" '
'JOIN "table1" "t1" ON "abc"."id"="t1"."t_ref" '
'GROUP BY "bar01"',
str(q),
)
def test_groupby_with_case_uses_the_alias(self):
q = (
Query.from_(self.t)
.select(
fn.Sum(self.t.foo).as_("bar"),
Case().when(self.t.fname == "Tom", "It was Tom").else_("It was someone else.").as_("who_was_it"),
)
.groupby(Case().when(self.t.fname == "Tom", "It was Tom").else_("It was someone else.").as_("who_was_it"))
)
self.assertEqual(
'SELECT SUM("foo") "bar",'
"CASE WHEN \"fname\"='Tom' THEN 'It was Tom' "
"ELSE 'It was someone else.' END \"who_was_it\" "
'FROM "abc" '
'GROUP BY "who_was_it"',
str(q),
)
def test_mysql_query_uses_backtick_quote_chars(self):
q = MySQLQuery.from_(self.t).groupby(self.t.foo).select(self.t.foo)
self.assertEqual("SELECT `foo` FROM `abc` GROUP BY `foo`", str(q))
def test_vertica_query_uses_double_quote_chars(self):
q = VerticaQuery.from_(self.t).groupby(self.t.foo).select(self.t.foo)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo"', str(q))
def test_mssql_query_uses_double_quote_chars(self):
q = MSSQLQuery.from_(self.t).groupby(self.t.foo).select(self.t.foo)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo"', str(q))
def test_oracle_query_uses_no_quote_chars(self):
q = OracleQuery.from_(self.t).groupby(self.t.foo).select(self.t.foo)
self.assertEqual('SELECT foo FROM abc GROUP BY foo', str(q))
def test_postgres_query_uses_double_quote_chars(self):
q = PostgreSQLQuery.from_(self.t).groupby(self.t.foo).select(self.t.foo)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo"', str(q))
def test_redshift_query_uses_double_quote_chars(self):
q = RedshiftQuery.from_(self.t).groupby(self.t.foo).select(self.t.foo)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo"', str(q))
def test_group_by__single_with_totals(self):
q = Query.from_(self.t).groupby(self.t.foo).select(self.t.foo).with_totals()
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo" WITH TOTALS', str(q))
def test_groupby__multi_with_totals(self):
q = Query.from_(self.t).groupby(self.t.foo, self.t.bar).select(self.t.foo, self.t.bar).with_totals()
self.assertEqual('SELECT "foo","bar" FROM "abc" GROUP BY "foo","bar" WITH TOTALS', str(q))
class HavingTests(unittest.TestCase):
table_abc, table_efg = Tables("abc", "efg")
def test_having_greater_than(self):
q = (
Query.from_(self.table_abc)
.select(self.table_abc.foo, fn.Sum(self.table_abc.bar))
.groupby(self.table_abc.foo)
.having(fn.Sum(self.table_abc.bar) > 1)
)
self.assertEqual(
'SELECT "foo",SUM("bar") FROM "abc" GROUP BY "foo" HAVING SUM("bar")>1',
str(q),
)
def test_having_and(self):
q = (
Query.from_(self.table_abc)
.select(self.table_abc.foo, fn.Sum(self.table_abc.bar))
.groupby(self.table_abc.foo)
.having((fn.Sum(self.table_abc.bar) > 1) & (fn.Sum(self.table_abc.bar) < 100))
)
self.assertEqual(
'SELECT "foo",SUM("bar") FROM "abc" GROUP BY "foo" HAVING SUM("bar")>1 AND SUM("bar")<100',
str(q),
)
def test_having_join_and_equality(self):
q = (
Query.from_(self.table_abc)
.join(self.table_efg)
.on(self.table_abc.foo == self.table_efg.foo)
.select(self.table_abc.foo, fn.Sum(self.table_efg.bar), self.table_abc.buz)
.groupby(self.table_abc.foo)
.having(self.table_abc.buz == "fiz")
.having(fn.Sum(self.table_efg.bar) > 100)
)
self.assertEqual(
'SELECT "abc"."foo",SUM("efg"."bar"),"abc"."buz" FROM "abc" '
'JOIN "efg" ON "abc"."foo"="efg"."foo" '
'GROUP BY "abc"."foo" '
'HAVING "abc"."buz"=\'fiz\' AND SUM("efg"."bar")>100',
str(q),
)
def test_mysql_query_uses_backtick_quote_chars(self):
q = (
MySQLQuery.from_(self.table_abc)
.select(self.table_abc.foo)
.groupby(self.table_abc.foo)
.having(self.table_abc.buz == "fiz")
)
self.assertEqual("SELECT `foo` FROM `abc` GROUP BY `foo` HAVING `buz`='fiz'", str(q))
def test_vertica_query_uses_double_quote_chars(self):
q = (
VerticaQuery.from_(self.table_abc)
.select(self.table_abc.foo)
.groupby(self.table_abc.foo)
.having(self.table_abc.buz == "fiz")
)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo" HAVING "buz"=\'fiz\'', str(q))
def test_mssql_query_uses_double_quote_chars(self):
q = (
MSSQLQuery.from_(self.table_abc)
.select(self.table_abc.foo)
.groupby(self.table_abc.foo)
.having(self.table_abc.buz == "fiz")
)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo" HAVING "buz"=\'fiz\'', str(q))
def test_oracle_query_uses_no_quote_chars(self):
q = (
OracleQuery.from_(self.table_abc)
.select(self.table_abc.foo)
.groupby(self.table_abc.foo)
.having(self.table_abc.buz == "fiz")
)
self.assertEqual('SELECT foo FROM abc GROUP BY foo HAVING buz=\'fiz\'', str(q))
def test_postgres_query_uses_double_quote_chars(self):
q = (
PostgreSQLQuery.from_(self.table_abc)
.select(self.table_abc.foo)
.groupby(self.table_abc.foo)
.having(self.table_abc.buz == "fiz")
)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo" HAVING "buz"=\'fiz\'', str(q))
def test_redshift_query_uses_double_quote_chars(self):
q = (
RedshiftQuery.from_(self.table_abc)
.select(self.table_abc.foo)
.groupby(self.table_abc.foo)
.having(self.table_abc.buz == "fiz")
)
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo" HAVING "buz"=\'fiz\'', str(q))
class OrderByTests(unittest.TestCase):
t = Table("abc")
def test_orderby_single_field(self):
q = Query.from_(self.t).orderby(self.t.foo).select(self.t.foo)
self.assertEqual('SELECT "foo" FROM "abc" ORDER BY "foo"', str(q))
def test_orderby_multi_fields(self):
q = Query.from_(self.t).orderby(self.t.foo, self.t.bar).select(self.t.foo, self.t.bar)
self.assertEqual('SELECT "foo","bar" FROM "abc" ORDER BY "foo","bar"', str(q))
def test_orderby_single_str(self):
q = Query.from_("abc").orderby("foo").select("foo")
self.assertEqual('SELECT "foo" FROM "abc" ORDER BY "foo"', str(q))
def test_orderby_asc(self):
q = Query.from_(self.t).orderby(self.t.foo, order=Order.asc).select(self.t.foo)
self.assertEqual('SELECT "foo" FROM "abc" ORDER BY "foo" ASC', str(q))
def test_orderby_desc(self):
q = Query.from_(self.t).orderby(self.t.foo, order=Order.desc).select(self.t.foo)
self.assertEqual('SELECT "foo" FROM "abc" ORDER BY "foo" DESC', str(q))
def test_orderby_no_alias(self):
bar = self.t.bar.as_("bar01")
q = Query.from_(self.t).select(fn.Sum(self.t.foo), bar).orderby(bar)
self.assertEqual(
'SELECT SUM("foo"),"bar" "bar01" FROM "abc" ORDER BY "bar"',
q.get_sql(orderby_alias=False),
)
def test_orderby_alias(self):
bar = self.t.bar.as_("bar01")
q = Query.from_(self.t).select(fn.Sum(self.t.foo), bar).orderby(bar)
self.assertEqual('SELECT SUM("foo"),"bar" "bar01" FROM "abc" ORDER BY "bar01"', q.get_sql())
class AliasTests(unittest.TestCase):
t = Table("abc")
def test_table_field(self):
q = Query.from_(self.t).select(self.t.foo.as_("bar"))
self.assertEqual('SELECT "foo" "bar" FROM "abc"', str(q))
def test_table_field__multi(self):
q = Query.from_(self.t).select(self.t.foo.as_("bar"), self.t.fiz.as_("buz"))
self.assertEqual('SELECT "foo" "bar","fiz" "buz" FROM "abc"', str(q))
def test_arithmetic_function(self):
q = Query.from_(self.t).select((self.t.foo + self.t.bar).as_("biz"))
self.assertEqual('SELECT "foo"+"bar" "biz" FROM "abc"', str(q))
def test_functions_using_as(self):
q = Query.from_(self.t).select(fn.Count("*").as_("foo"))
self.assertEqual('SELECT COUNT(*) "foo" FROM "abc"', str(q))
def test_functions_using_constructor_param(self):
q = Query.from_(self.t).select(fn.Count("*", alias="foo"))
self.assertEqual('SELECT COUNT(*) "foo" FROM "abc"', str(q))
def test_function_using_as_nested(self):
"""
We don't show aliases of fields that are arguments of a function.
"""
q = Query.from_(self.t).select(fn.Sqrt(fn.Count("*").as_("foo")).as_("bar"))
self.assertEqual('SELECT SQRT(COUNT(*)) "bar" FROM "abc"', str(q))
def test_functions_using_constructor_param_nested(self):
"""
We don't show aliases of fields that are arguments of a function.
"""
q = Query.from_(self.t).select(fn.Sqrt(fn.Count("*", alias="foo"), alias="bar"))
self.assertEqual('SELECT SQRT(COUNT(*)) "bar" FROM "abc"', str(q))
def test_ignored_in_where(self):
q = Query.from_(self.t).select(self.t.foo).where(self.t.foo.as_("bar") == 1)
self.assertEqual('SELECT "foo" FROM "abc" WHERE "foo"=1', str(q))
def test_ignored_in_groupby(self):
q = Query.from_(self.t).select(self.t.foo).groupby(self.t.foo.as_("bar"))
self.assertEqual('SELECT "foo" FROM "abc" GROUP BY "foo"', str(q))
def test_ignored_in_orderby(self):
q = Query.from_(self.t).select(self.t.foo).orderby(self.t.foo.as_("bar"))
self.assertEqual('SELECT "foo" FROM "abc" ORDER BY "foo"', str(q))
def test_ignored_in_criterion(self):
c = self.t.foo.as_("bar") == 1
self.assertEqual('"foo"=1', str(c))
def test_ignored_in_criterion_comparison(self):
c = self.t.foo.as_("bar") == self.t.fiz.as_("buz")
self.assertEqual('"foo"="fiz"', str(c))
def test_ignored_in_field_inside_case(self):
q = Query.from_(self.t).select(Case().when(self.t.foo == 1, "a").else_(self.t.bar.as_('"buz"')))
self.assertEqual('SELECT CASE WHEN "foo"=1 THEN \'a\' ELSE "bar" END FROM "abc"', str(q))
def test_case_using_as(self):
q = Query.from_(self.t).select(Case().when(self.t.foo == 1, "a").else_("b").as_("bar"))
self.assertEqual(
'SELECT CASE WHEN "foo"=1 THEN \'a\' ELSE \'b\' END "bar" FROM "abc"',
str(q),
)
def test_case_using_constructor_param(self):
q = Query.from_(self.t).select(Case(alias="bar").when(self.t.foo == 1, "a").else_("b"))
self.assertEqual(
'SELECT CASE WHEN "foo"=1 THEN \'a\' ELSE \'b\' END "bar" FROM "abc"',
str(q),
)
def test_select__multiple_tables(self):
table_abc, table_efg = Table("abc", alias="q0"), Table("efg", alias="q1")
q = Query.from_(table_abc).select(table_abc.foo).from_(table_efg).select(table_efg.bar)
self.assertEqual('SELECT "q0"."foo","q1"."bar" FROM "abc" "q0","efg" "q1"', str(q))
def test_use_aliases_in_groupby_and_orderby(self):
table_abc = Table("abc", alias="q0")
my_foo = table_abc.foo.as_("my_foo")
q = Query.from_(table_abc).select(my_foo, table_abc.bar).groupby(my_foo).orderby(my_foo)
self.assertEqual(
'SELECT "q0"."foo" "my_foo","q0"."bar" ' 'FROM "abc" "q0" ' 'GROUP BY "my_foo" ' 'ORDER BY "my_foo"',
str(q),
)
def test_table_with_schema_and_alias(self):
table = Table("abc", schema="schema", alias="alias")
self.assertEqual('"schema"."abc" "alias"', str(table))
def test_null_value_with_alias(self):
q = Query.select(NullValue().as_("abcdef"))
self.assertEqual('SELECT NULL "abcdef"', str(q))
class SubqueryTests(unittest.TestCase):
maxDiff = None
table_abc, table_efg, table_hij = Tables("abc", "efg", "hij")
def test_where__in(self):
q = (
Query.from_(self.table_abc)
.select("*")
.where(
self.table_abc.foo.isin(
Query.from_(self.table_efg).select(self.table_efg.foo).where(self.table_efg.bar == 0)
)
)
)
self.assertEqual(
'SELECT * FROM "abc" WHERE "foo" IN (SELECT "foo" FROM "efg" WHERE "bar"=0)',
str(q),
)
def test_where__in_nested(self):
q = Query.from_(self.table_abc).select("*").where(self.table_abc.foo).isin(self.table_efg.select("*"))
self.assertEqual('SELECT * FROM "abc" WHERE "foo" IN (SELECT * FROM "efg")', str(q))
def test_join(self):
subquery = Query.from_("efg").select("fiz", "buz").where(F("buz") == 0)
q = (
Query.from_(self.table_abc)
.join(subquery)
.on(self.table_abc.bar == subquery.buz)
.select(self.table_abc.foo, subquery.fiz)
)
self.assertEqual(
'SELECT "abc"."foo","sq0"."fiz" FROM "abc" '
'JOIN (SELECT "fiz","buz" FROM "efg" WHERE "buz"=0) "sq0" '
'ON "abc"."bar"="sq0"."buz"',
str(q),
)
def test_select_subquery(self):
subq = Query.from_(self.table_efg).select("fizzbuzz").where(self.table_efg.id == 1)
q = Query.from_(self.table_abc).select("foo", "bar").select(subq)
self.assertEqual(
'SELECT "foo","bar",(SELECT "fizzbuzz" FROM "efg" WHERE "id"=1) ' 'FROM "abc"',
str(q),
)
def test_select_subquery_with_alias(self):
subq = Query.from_(self.table_efg).select("fizzbuzz").where(self.table_efg.id == 1)
q = Query.from_(self.table_abc).select("foo", "bar").select(subq.as_("sq"))
self.assertEqual(
'SELECT "foo","bar",(SELECT "fizzbuzz" FROM "efg" WHERE "id"=1) "sq" ' 'FROM "abc"',
str(q),
)
def test_where__equality(self):
subquery = Query.from_("efg").select("fiz").where(F("buz") == 0)
query = (
Query.from_(self.table_abc)
.select(self.table_abc.foo, self.table_abc.bar)
.where(self.table_abc.bar == subquery)
)
self.assertEqual(
'SELECT "foo","bar" FROM "abc" ' 'WHERE "bar"=(SELECT "fiz" FROM "efg" WHERE "buz"=0)',
str(query),
)
def test_select_from_nested_query(self):
subquery = Query.from_(self.table_abc).select(
self.table_abc.foo,
self.table_abc.bar,
(self.table_abc.fizz + self.table_abc.buzz).as_("fizzbuzz"),
)
query = Query.from_(subquery).select(subquery.foo, subquery.bar, subquery.fizzbuzz)
self.assertEqual(
'SELECT "sq0"."foo","sq0"."bar","sq0"."fizzbuzz" '
"FROM ("
'SELECT "foo","bar","fizz"+"buzz" "fizzbuzz" '
'FROM "abc"'
') "sq0"',
str(query),
)
def test_select_from_nested_query_with_join(self):
subquery1 = (
Query.from_(self.table_abc)
.select(
self.table_abc.foo,
fn.Sum(self.table_abc.fizz + self.table_abc.buzz).as_("fizzbuzz"),
)
.groupby(self.table_abc.foo)
)
subquery2 = Query.from_(self.table_efg).select(
self.table_efg.foo.as_("foo_two"),
self.table_efg.bar,
)
query = (
Query.from_(subquery1)
.select(subquery1.foo, subquery1.fizzbuzz)
.join(subquery2)
.on(subquery1.foo == subquery2.foo_two)
.select(subquery2.foo_two, subquery2.bar)
)
self.assertEqual(
"SELECT "
'"sq0"."foo","sq0"."fizzbuzz",'
'"sq1"."foo_two","sq1"."bar" '
"FROM ("
"SELECT "
'"foo",SUM("fizz"+"buzz") "fizzbuzz" '
'FROM "abc" '
'GROUP BY "foo"'
') "sq0" JOIN ('
"SELECT "
'"foo" "foo_two","bar" '
'FROM "efg"'
') "sq1" ON "sq0"."foo"="sq1"."foo_two"',
str(query),
)
def test_from_subquery_without_alias(self):
subquery = Query.from_(self.table_efg).select(
self.table_efg.base_id.as_("x"), self.table_efg.fizz, self.table_efg.buzz
)
test_query = Query.from_(subquery).select(subquery.x, subquery.fizz, subquery.buzz)
self.assertEqual(
'SELECT "sq0"."x","sq0"."fizz","sq0"."buzz" '
"FROM ("
'SELECT "base_id" "x","fizz","buzz" FROM "efg"'
') "sq0"',
str(test_query),
)
def test_join_query_with_alias(self):
subquery = (
Query.from_(self.table_efg)
.select(
self.table_efg.base_id.as_("x"),
self.table_efg.fizz,
self.table_efg.buzz,
)
.as_("subq")
)
test_query = Query.from_(subquery).select(subquery.x, subquery.fizz, subquery.buzz)
self.assertEqual(
'SELECT "subq"."x","subq"."fizz","subq"."buzz" '
"FROM ("
'SELECT "base_id" "x","fizz","buzz" FROM "efg"'
') "subq"',
str(test_query),
)
def test_with(self):
sub_query = Query.from_(self.table_efg).select("fizz")
test_query = Query.with_(sub_query, "an_alias").from_(AliasedQuery("an_alias")).select("*")
self.assertEqual(
'WITH an_alias AS (SELECT "fizz" FROM "efg") SELECT * FROM an_alias',
str(test_query),
)
def test_join_with_with(self):
sub_query = Query.from_(self.table_efg).select("fizz")
test_query = (
Query.with_(sub_query, "an_alias")
.from_(self.table_abc)
.join(AliasedQuery("an_alias"))
.on(AliasedQuery("an_alias").fizz == self.table_abc.buzz)
.select("*")
)
self.assertEqual(
'WITH an_alias AS (SELECT "fizz" FROM "efg") '
'SELECT * FROM "abc" JOIN an_alias ON "an_alias"."fizz"="abc"."buzz"',
str(test_query),
)
def test_select_from_with_returning(self):
sub_query = PostgreSQLQuery.into(self.table_abc).insert(1).returning('*')
test_query = Query.with_(sub_query, "an_alias").from_(AliasedQuery("an_alias")).select("*")
self.assertEqual(
'WITH an_alias AS (INSERT INTO "abc" VALUES (1) RETURNING *) SELECT * FROM an_alias', str(test_query)
)
class QuoteTests(unittest.TestCase):
def test_extraneous_quotes(self):
t1 = Table("table1", alias="t1")
t2 = Table("table2", alias="t2")
query = Query.from_(t1).join(t2).on(t1.Value.between(t2.start, t2.end)).select(t1.value)
self.assertEqual(
"SELECT t1.value FROM table1 t1 " "JOIN table2 t2 ON t1.Value " "BETWEEN t2.start AND t2.end",
query.get_sql(quote_char=None),
)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Xcode 4 template generator
#
# Based on the original code by Ricardo Quesada.
# Modifications & Ugly Hacks by Nicolas Goles D.
#
# LICENSE: MIT
#
# Generates an Xcode4 template given several input parameters.
#
# Format taken from: http://blog.boreal-kiss.net/2011/03/11/a-minimal-project-template-for-xcode-4/
#
# NOTE: Not everything is automated, and some understanding about the Xcode4 template system
# is still needed to use this script properly (read the link above).
# ----------------------------------------------------------------------------
'''
Xcode 4 template generator
'''
__docformat__ = 'restructuredtext'
#Add here whatever you need before your Node
_template_open_body = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>"""
_template_description = None
_template_identifier = None
_template_concrete = "yes"
_template_ancestors = None
_template_kind = "Xcode.Xcode3.ProjectTemplateUnitKind"
_template_close_body = "\n</dict>\n</plist>"
_template_plist_name = "TemplateInfo.plist"
_template_shared_settings = None
_template_script = None
_template_script_shell_path = "/bin/bash"
# python imports
import sys
import os
import getopt
import glob
import shutil
class Xcode4Template(object):
def __init__( self, directories, group = None, in_output_path = None, ignored_files = None):
self.currentRootDirectory = None
self.directories = directories #directory list
self.ignored_files = ignored_files
self.files_to_include = []
self.wildcard = '*'
self.allowed_extensions = ['h', 'hpp', 'c', 'cpp', 'cc', 'm', 'mm', 'lua', 'png', 'fnt', 'pvr'] # Extensions of files to add to project.
self.ignore_dir_extensions = ['xcodeproj']
self.group = group # fixed group name
self.group_index = 1 # automatic group name taken from path
self.output = []
self.output_path = in_output_path
def scandirs(self, path):
for currentFile in glob.glob(os.path.join(path, self.wildcard)):
if os.path.isdir(currentFile):
name_extension = currentFile.split('.')
extension = name_extension[-1]
if extension not in self.ignore_dir_extensions and currentFile not in self.ignored_files:
self.scandirs(currentFile)
else:
self.include_file_to_append(currentFile)
#
# append file
#
def include_file_to_append(self, currentFile):
currentExtension = currentFile.split('.')
if currentExtension[-1] in self.allowed_extensions:
self.files_to_include.append( currentFile )
#
# Helper method to filter files by absolute path when using shutils.copytree
#
def ignore_files(self, directory_entered, directory_contents):
should_ignore = []
for file in self.ignored_files:
for content in directory_contents:
if os.path.join(os.path.abspath(directory_entered), content) == file:
should_ignore.append(content)
return should_ignore
#
# Change the Absolute Path to a relative path ( starting from directory that scandirs is using )
#
def change_path_to_relative( self, absolute_path ):
return os.path.relpath( absolute_path, os.path.split( os.path.relpath(self.currentRootDirectory) )[0])
#
# append the definitions
#
def append_definition( self, output_body, path, group ):
output_body.append("\n\t\t<key>%s</key>" % self.change_path_to_relative(path) )
output_body.append("\t\t<dict>")
#Fix the absolute path so that the Xcode Groups created for the .xctemplate directory are relative.
path = self.change_path_to_relative(path)
groups = path.split('/')
output_body.append("\t\t\t<key>Group</key>\t\t\t")
output_body.append("\t\t\t<array>")
for group in groups[:(len(groups)-1)]:
output_body.append("\t\t\t\t<string>%s</string>" % group)
output_body.append("\t\t\t</array>")
output_body.append("\t\t\t<key>Path</key>\n\t\t\t<string>%s</string>" % path )
output_body.append("\t\t</dict>")
#
# Generate the "Definitions" section
#
def generate_definitions( self ):
output_banner = "\n\n\t<!-- Definitions section -->"
output_header = "\n\t<key>Definitions</key>"
output_dict_open = "\n\t<dict>"
output_dict_close = "\n\t</dict>"
output_body = []
for path in self.files_to_include:
# group name
group = None
if self.group is not None:
group = self.group
else:
# obtain group name from directory
dirs = os.path.dirname(path)
subdirs = dirs.split('/')
if self.group_index < len(subdirs):
group = subdirs[self.group_index]
else:
# error
group = None
# get the extension
filename = os.path.basename(path)
name_extension= filename.split('.')
extension = None
if len(name_extension) == 2:
extension = name_extension[1]
self.append_definition( output_body, path, group )
self.output.append( output_banner )
self.output.append( output_header )
self.output.append( output_dict_open )
self.output.append( "\n".join( output_body ) )
self.output.append( output_dict_close )
#
# Generates the "Nodes" section
#
def generate_nodes( self ):
output_banner = "\n\n\t<!-- Nodes section -->"
output_header = "\n\t<key>Nodes</key>"
output_open = "\n\t<array>\n"
output_close = "\n\t</array>"
output_body = []
for path in self.files_to_include:
output_body.append("\t\t<string>%s</string>" % self.change_path_to_relative(path) )
self.output.append( output_banner )
self.output.append( output_header )
self.output.append( output_open )
self.output.append( "\n".join( output_body ) )
self.output.append( output_close )
#
# Format the output .plist string
#
def format_xml( self ):
self.output.append( _template_open_body )
if _template_description or _template_identifier or _template_kind:
self.output.append ("\n\t<!--Header Section-->")
if _template_description != None:
self.output.append( "\n\t<key>Description</key>\n\t<string>%s</string>" % _template_description )
if _template_identifier:
self.output.append( "\n\t<key>Identifier</key>\n\t<string>%s</string>" % _template_identifier )
self.output.append( "\n\t<key>Concrete</key>")
if _template_concrete.lower() == "yes":
self.output.append( "\n\t<string>True</string>" )
elif _template_concrete.lower() == "no":
self.output.append( "\n\t<string>False</string>" )
self.output.append( ("\n\t<key>Kind</key>\n\t<string>%s</string>" % _template_kind) )
if _template_ancestors:
self.output.append("\n\t<key>Ancestors</key>\n\t<array>")
ancestors = _template_ancestors.split(" ")
for ancestor in ancestors:
self.output.append("\n\t\t<string>%s</string>" % str(ancestor))
self.output.append("\n\t</array>")
if _template_shared_settings:
self.output.append("\n\t<key>Project</key>")
self.output.append("\n\t<array>\n\t\t<dict>")
self.output.append("\n\t\t\t<key>SharedSettings</key>")
self.output.append("\n\t\t\t<dict>")
shared_settings = _template_shared_settings.split(" ")
if len(shared_settings) % 2 != 0:
print "Shared Settings parameters should be an even number (use '*' if only key is needed)"
sys.exit(-1)
for i in range( len(shared_settings) - 1 ) :
if( str(shared_settings[i]) != "*"):
self.output.append("\n\t\t\t\t<key>%s</key>" % str(shared_settings[i]))
if (shared_settings[i+1] == "*"):
self.output.append("\n\t\t\t\t<string></string>")
else:
self.output.append("\n\t\t\t\t<string>%s</string>" % str(shared_settings[i+1]))
self.output.append("\n\t\t\t</dict>\n\t\t</dict>\n\t</array>")
if _template_script:
script_file = open(_template_script, 'r')
if not script_file:
sys.exit("Error reading " + _template_script)
self.output.append("\n\t<key>Targets</key>")
self.output.append("\n\t<array>\n\t\t<dict>")
self.output.append("\n\t\t\t<key>BuildPhases</key>")
self.output.append("\n\t\t\t<array>\n\t\t\t\t<dict>")
self.output.append("\n\t\t\t\t\t<key>Class</key>")
self.output.append("\n\t\t\t\t\t<string>ShellScript</string>")
self.output.append("\n\t\t\t\t\t<key>ShellPath</key>")
self.output.append("\n\t\t\t\t\t<string>"+ _template_script_shell_path +"</string>")
self.output.append("\n\t\t\t\t\t<key>ShellScript</key>")
self.output.append("\n\t\t\t\t\t<string>\n\t\t\t\t\t\t" + script_file.read() + "\n\t\t\t\t\t</string>")
self.output.append("\n\t\t\t</dict>\n\t\t\t\t</array>")
self.output.append("\n\t</dict>\n\t\t</array>")
self.generate_definitions()
self.generate_nodes()
self.output.append( _template_close_body )
#
# Create "TemplateInfo.plist" file.
#
def write_xml( self ):
FILE = open( _template_plist_name, "w" )
FILE.writelines( self.output )
FILE.close()
#
# Generates the template directory.
#
def pack_template_dir ( self, full_output_path ):
if full_output_path is None:
full_output_path = os.path.abspath("./UntitledTemplate")
(template_path, template_name) = os.path.split( os.path.normpath(full_output_path) )
for directory in self.directories:
(_, base_dir) = os.path.split(directory)
if(os.path.splitext(template_name)[1] != ".xctemplate"):
full_output_path = os.path.join(template_path, template_name + ".xctemplate")
target_dir = os.path.normpath(full_output_path) + "/" + base_dir
shutil.copytree(directory,
target_dir,
ignore = self.ignore_files)
shutil.move("TemplateInfo.plist", os.path.normpath(full_output_path))
#
# Scan Dirs, format & write.
#
def generate( self ):
for aDirectory in self.directories:
self.currentRootDirectory = aDirectory
self.scandirs(aDirectory)
self.format_xml()
self.write_xml()
def help():
print "%s v1.1 - Xcode 4 Template Generator v1.1" % sys.argv[0]
print "Usage:"
print "\t-c concrete (concrete or \"abstract\" Xcode template)"
print "\t-d one or more space separated directories (e.g -d \"box2d/ someLib/ someOtherLib/\")"
print "\t-g group (group name for Xcode template)"
print "\t-o output (output path)"
print "\t--script Specify Script to be run in the compile phases"
print "\t--shell_path Specify script shell path (defaults to /bin/bash)"
print "\t--description \"Xcode Template description\""
print "\t--identifier (string to identify this template)"
print "\t--ancestors (string separated by spaces containing all ancestor ids)"
print "\t--settings Specify build settings for the project (experimental)"
print "\t--ignore_files Specify a space separated list of files to ignore (e.g \"ignore/a/dir ignore/some/file.txt\")"
print "\nExample:"
print "\t%s -d cocos2d --description \"This is my template\" -i com.yoursite.template --ancestors com.yoursite.ancestor1 -c no --settings \"GCC_THUMB_SUPPORT[arch=armv6] *\" " % sys.argv[0]
sys.exit(-1)
if __name__ == "__main__":
if len( sys.argv ) == 1:
help()
directories = []
ignored_files = []
group = None
output = None
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "d:g:i:a:c:o:", ["directories=","group=", "identifier=", "ancestors=", "concrete=", "output=", "settings=", "description=", "ignore_files=", "script=", "shell_path="])
for opt, arg in opts:
if opt in ("-d","--directory"):
for directory in arg.split(" "):
directory = os.path.abspath(directory)
directories.append(directory)
elif opt in ("-g","--group"):
group = arg
elif opt in ("-o", "--output"):
output = arg
elif opt in ("--description"):
_template_description = arg
elif opt in ("--identifier"):
_template_identifier = arg
elif opt in ("--ancestors"):
_template_ancestors = arg
elif opt in ("-c", "--concrete"):
_template_concrete = arg
elif opt in ("-s", "--settings"):
_template_shared_settings = arg
elif opt in ("--ignore_files"):
for directory in arg.split(" "):
directory = os.path.abspath(directory.strip('/'))
ignored_files.append(directory)
elif opt in ("--script"):
_template_script = arg
elif opt in ("--shell_path"):
_template_script_shell_path = arg
except getopt.GetoptError,e:
print e
if directory == None:
help()
gen = Xcode4Template(directories, group, output, ignored_files)
gen.generate()
gen.pack_template_dir(output)
| |
#!/usr/bin/env python
#
# The MIT License
#
# Copyright (c) 2010 Kyle Conroy
#
# (Python 3 compatability fixes made by Mark Nenadov)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
PY3 = sys.version_info[0] == 3
import json
import logging
import multiprocessing
import os
import argparse
import urllib
if PY3:
from urllib.request import urlretrieve
from urllib.request import urlopen
from urllib.error import HTTPError
import urllib.parse as urlparse
else:
from urllib import urlretrieve
from urllib import urlopen
from urllib2 import HTTPError
from urlparse import urlparse
USER_FIELDS = [
"first_name", "last_name", "gender", "username", "email",
"about", "bio", "birthday", "education", "hometown", "sports",
"relationship_status", "religion", "website", "work",
]
# Async Functions
def save_file(url, filename):
"""Save a photo.
Args:
url: The url of the photo
filename: The filename of the new photo
"""
logging.debug("Saving" + filename)
try:
urlretrieve(url, filename)
except HTTPError:
logging.error("Could not open url:%s" % url)
def save_note(note, filename):
"""Save a note
Args:
note: A note object from the Facebook Graph API
filename: The path for the new note
"""
logging.debug("Saving note %s" % filename)
try:
f = open(filename, 'w')
f.write(note["subject"].encode('utf-8'))
f.write("\n")
f.write(note["message"].encode('utf-8'))
f.close()
except:
logging.error("Could not save note %s" % filename)
f.close()
class Scrapebook(object):
""" Scrapebook downloads all your data from Facebook to your computer.
Scrapebook connects to the Facebook Graph API.
"""
def __init__(self, token, resources=None):
"""Create a new Scrape book object.
Args:
token: A long and unintelligible key for the Graph API
"""
self.base = "https://graph.facebook.com"
self.pool = multiprocessing.Pool(processes=35)
self.token = token
self.resources = resources or ["photos", "friends", "videos", "notes"]
def _clean(self, s):
"""Returns a safe and clean filename for any given string
Args:
string: The string to be cleaned
Returns:
A cleaned strings, suitable for a filename
"""
return "".join([x for x in s if x.isalpha() or x.isdigit()])
def _create_dir(self, *args):
"""Create a directory inside the Facebook directory.
Will not complain if the directory already exists.
Args:
Various directory names
Returns:
The path to the new directory
"""
path = os.path.join(*args)
path = os.path.join(os.curdir, path)
if not os.path.isdir(path):
logging.debug("Creating directory: %s" % path)
os.mkdir(path)
return path
def api_request(self, path, limit=10000, params=None):
"""Perform a Facebook Graph API request and return the data.
The returned JSON is parsed into native Python objects before being
returned.
Args:
path: relative resource url
limit: number of results to return. Default 10000
Returns:
A dictionary. If an error occured, the returned dictionary is empty
"""
params = params or {}
params["limit"] = 10000
url = ("https://graph.facebook.com/%s?access_token=%s&%s" %
(path, self.token, urllib.urlencode(params)))
logging.debug(url)
try:
data = urlopen(url)
if PY3:
json_data = str(data.read(), 'utf-8')
else:
json_data = data.read()
data = json.loads(json_data)
except HTTPError:
logging.error("Could not retreive %s" % url)
data = {}
if "error" in data:
error = data["error"]
logging.error("{}: {}".format(error["type"], error["message"]))
data = {}
return data
def scrape_photos(self):
"""Scrape all tagged photos and uploaded albums"""
photo_dir = self._create_dir("facebook", "photos")
albums = self.api_request("/me/albums")
if not albums:
print "Error: Could not scrape photo data"
return
photo_albums = [("/%s/photos" % a["id"], a["name"])
for a in albums["data"]]
photo_albums.append(("/me/photos", "me"))
for album in photo_albums:
url, name = album
name = self._clean(name)
album_dir = self._create_dir("facebook", "photos", name)
photos = self.api_request(url)
if not photos:
print "Error: Could not download album"
continue
for i, photo in enumerate(photos["data"]):
purl = photo["source"]
filename = os.path.join(album_dir, "%s_%d.jpg" % (name, i))
self.pool.apply_async(save_file, [purl, filename])
def scrape_videos(self):
"""Scrape all tagged videos and uploaded videos"""
videos_dir = self._create_dir("facebook", "videos")
videos = self.api_request("/me/videos/uploaded")
tags = self.api_request("/me/videos")
if not videos or not tags:
print "Error: Could not scrape your movies"
return
for video in videos["data"] + tags["data"]:
name = self._clean(video["name"])
fn, ext = os.path.splitext(urlparse(video["source"]).path)
vurl = video["source"]
filename = os.path.join(videos_dir, "%s%s" % (name, ext))
self.pool.apply_async(save_file, [vurl, filename])
def scrape_notes(self):
"""Scrape all notes a user composed or a user is tagged in."""
notes_dir = self._create_dir("facebook", "notes")
notes = self.api_request("/me/notes")
if not notes:
print "Error: Could not scrape your notes"
return
for n in notes["data"]:
title = self._clean(n["subject"][:15])
filename = os.path.join(notes_dir, "%s.txt" % title)
self.pool.apply_async(save_note, [n, filename])
def scrape_friends(self):
"""Scrape all friends. Stored in JSON objects"""
friends_file = os.path.join("facebook", "friends.json")
options = {"fields": ",".join(USER_FIELDS)}
friends = self.api_request("/me/friends", params=options)
if not friends:
print "Error: Could not scrape your friends"
return
json.dump(friends["data"], open(friends_file, "w"))
def run(self):
self._create_dir("facebook")
if "photos" in self.resources:
self.scrape_photos()
if "notes" in self.resources:
self.scrape_notes()
if "videos" in self.resources:
self.scrape_videos()
if "friends" in self.resources:
self.scrape_friends()
self.pool.close()
self.pool.join()
def main():
usage = ("To get your authtoken, head over to http://developers."
"facebook.com/docs/api, click on the https://graph.facebook."
"com/me/photos link, and copy the auth token in the url to "
"the command line")
parser = argparse.ArgumentParser(
description="Facebook shouldn't own your soul")
parser.add_argument("-t", "--token", dest="token", help=usage)
parser.add_argument("-d", "--debug", dest="debug", action="store_true",
help="Turn on debug information")
parser.add_argument('resources', type=str, nargs='*',
default=None, help='resources to scrape')
args = parser.parse_args()
if not args.token:
parser.error("option -t is required")
if args.debug:
logging.basicConfig(level=logging.DEBUG)
scraper = Scrapebook(args.token, resources=args.resources)
scraper.run()
if __name__ == '__main__':
main()
| |
"""All models are direct mappings to the WePay objects. By default only the
fields that correspond to the values returned from WePay lookup calls
(ex. `/account <https://www.wepay.com/developer/reference/account#lookup>`_) are
included in the models. All fields follow the rules outlined in `Storing Data
<https://www.wepay.com/developer/reference/storing_data>`_, unless otherwise
specified in object's documentation. For that reason values, which have there
names end with '_uri' (ex. ``account_uri``) are not included as model fields,
instead they are added as dynamic cached object properties, which are inherited
from Api objects defined in :mod:`djwepay.api`.
"""
from django.db import models
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from djwepay.api import *
from djwepay.fields import MoneyField
from djwepay.managers import *
from json_field import JSONField
__all__ = [
'App', 'User', 'Account', 'Checkout', 'Preapproval', 'Withdrawal',
'CreditCard', 'SubscriptionPlan', 'Subscription', 'SubscriptionCharge',
'get_wepay_model_name', 'get_wepay_model'
]
@python_2_unicode_compatible
class BaseModel(models.Model):
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
ordering = ['-date_created']
def save(self, *args, **kwargs):
''' On save, update timestamps '''
self.date_modified = timezone.now()
if not self.date_created:
self.date_created = self.date_modified
return super(BaseModel, self).save(*args, **kwargs)
def __str__(self):
return "%s: %s - %s" % (self._meta.verbose_name, self.pk, self.state)
class App(AppApi, BaseModel):
"""
This model stores all of the relevant WePay application information. Only one
instance of it at a time is supported per django application, which is
controlled by :ref:`WEPAY_APP_ID` setting.
"""
# fields returned with a lookup call
client_id = models.BigIntegerField(primary_key=True)
status = models.CharField(max_length=255)
state = models.CharField(max_length=255)
api_version = models.CharField(max_length=255)
theme_object = JSONField(null=True, blank=True)
gaq_domains = JSONField(null=True, blank=True)
# Administrative objects attached to account, they are null=True just
# for initialization of the App, but are required for proper functionality.
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='apps', null=True,
help_text="Account attached to App where you can collect money.")
user = models.ForeignKey(
get_wepay_model_name('user'), related_name='apps', null=True,
help_text="Owner of this App")
client_secret = models.CharField(max_length=255)
objects = AppManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('app')
db_table = 'djwepay_app'
verbose_name = 'WePay App'
class User(UserApi, BaseModel):
user_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), related_name='users', null=True)
user_name = models.CharField(max_length=255)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
email = models.EmailField(max_length=255)
state = models.CharField(max_length=255)
# access_token=None means it has been revoked.
access_token = models.CharField(null=True, max_length=255)
token_type = "BEARER"
expires_in = models.BigIntegerField(null=True, blank=True)
objects = UserManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('user')
db_table = 'djwepay_user'
verbose_name = 'WePay User'
@property
def full_email(self):
return "%s <%s>" % (self.user_name, self.email)
class Account(AccountApi, BaseModel):
account_id = models.BigIntegerField(primary_key=True)
user = models.ForeignKey(
get_wepay_model_name('user'), related_name='accounts', null=True)
name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
description = models.CharField(max_length=255)
reference_id = models.CharField(max_length=255, blank=True)
gaq_domains = JSONField(null=True, blank=True)
theme_object = JSONField(null=True, blank=True)
type = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
balances = JSONField(null=True, blank=True)
statuses = JSONField(null=True, blank=True)
action_reasons = JSONField(null=True, blank=True)
country = models.CharField(max_length=2)
currencies = JSONField(null=True, blank=True)
def _get_owner_user_id(self):
return self.user_id
def _set_owner_user_id(self, value):
if self.user is None or self.user_id != value:
try:
user = User.objects.get(user_id=value)
self.user = user
except User.DoesNotExist: pass
owner_user_id = property(_get_owner_user_id, _set_owner_user_id)
objects = AccountManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('account')
db_table = 'djwepay_account'
verbose_name = 'WePay Account'
class Checkout(CheckoutApi, BaseModel):
checkout_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='checkouts')
preapproval = models.ForeignKey(
get_wepay_model_name('preapproval'), related_name='checkouts', null=True)
state = models.CharField(max_length=255)
soft_descriptor = models.CharField(max_length=255)
short_description = models.CharField(max_length=255)
long_description = models.CharField(max_length=2047, blank=True)
currency = "USD"
amount = MoneyField(null=True)
fee = MoneyField(null=True)
gross = MoneyField(null=True)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
reference_id = models.CharField(max_length=255, blank=True)
payer_email = models.EmailField(max_length=255, blank=True)
payer_name = models.CharField(max_length=255, blank=True)
cancel_reason = models.CharField(max_length=255, blank=True)
refund_reason = models.CharField(max_length=255, blank=True)
auto_capture = models.BooleanField(default=True)
require_shipping = models.BooleanField(default=False)
shipping_address = JSONField(null=True)
tax = MoneyField(null=True)
amount_refunded = MoneyField(null=True)
amount_charged_back = MoneyField(null=True)
create_time = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('checkout')
db_table = 'djwepay_checkout'
verbose_name = 'WePay Checkout'
class Preapproval(PreapprovalApi, BaseModel):
preapproval_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), null=True, related_name='preapprovals')
account = models.ForeignKey(
get_wepay_model_name('account'), null=True, related_name='preapprovals')
short_description = models.CharField(max_length=255)
long_description = models.CharField(max_length=2047, blank=True)
currency = "USD"
amount = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
period = models.CharField(max_length=255)
frequency = models.IntegerField(null=True)
start_time = models.BigIntegerField(null=True)
end_time = models.BigIntegerField(null=True)
reference_id = models.CharField(max_length=255)
shipping_address = JSONField(null=True)
shipping_fee = MoneyField(null=True)
tax = MoneyField(null=True)
auto_recur = models.BooleanField(default=False)
payer_name = models.CharField(max_length=255)
payer_email = models.EmailField(max_length=255, blank=True)
create_time = models.BigIntegerField(null=True)
next_due_time = models.BigIntegerField(null=True)
last_checkout = models.ForeignKey(
get_wepay_model_name('checkout'), null=True, related_name='+')
last_checkout_time = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
objects = PreapprovalManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('preapproval')
db_table = 'djwepay_preapproval'
verbose_name = 'WePay Preapproval'
class Withdrawal(WithdrawalApi, BaseModel):
withdrawal_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='withdrawals')
state = models.CharField(max_length=255)
amount = MoneyField(null=True)
note = models.CharField(max_length=255)
recipient_confirmed = models.NullBooleanField()
type = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
capture_time = models.BigIntegerField(null=True)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('withdrawal')
db_table = 'djwepay_withdrawal'
verbose_name = 'WePay Preapproval'
class CreditCard(CreditCardApi, BaseModel):
credit_card_id = models.BigIntegerField(primary_key=True)
app = models.ForeignKey(
get_wepay_model_name('app'), related_name='credit_cards')
credit_card_name = models.CharField(max_length=255)
state = models.CharField(max_length=255)
user_name = models.CharField(max_length=255)
email = models.CharField(max_length=255, blank=True)
reference_id = models.CharField(max_length=255, blank=True)
create_time = models.BigIntegerField(null=True)
input_source = models.CharField(max_length=255, blank=True)
virtual_terminal_mode = models.CharField(max_length=255, blank=True)
expiration_month = models.IntegerField(null=True)
expiration_year = models.IntegerField(null=True)
last_four = models.CharField(max_length=255, blank=True)
class Meta(BaseModel.Meta):
abstract = is_abstract('credit_card')
db_table = 'djwepay_credit_card'
verbose_name = 'WePay Credit Card'
class SubscriptionPlan(SubscriptionPlanApi, BaseModel):
subscription_plan_id = models.BigIntegerField(primary_key=True)
account = models.ForeignKey(
get_wepay_model_name('account'), related_name='subscription_plans')
name = models.CharField(max_length=255)
short_description = models.CharField(max_length=2047)
currency = models.CharField(max_length=3)
amount = MoneyField(null=True)
period = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
number_of_subscriptions = models.BigIntegerField(null=True)
trial_length = models.BigIntegerField(null=True)
setup_fee = MoneyField(null=True)
reference_id = models.CharField(max_length=255)
objects = AccountObjectsManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription_plan')
db_table = 'djwepay_subscription_plan'
verbose_name = 'WePay Subscription Plan'
class Subscription(SubscriptionApi, BaseModel):
subscription_id = models.BigIntegerField(primary_key=True)
subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'), related_name='subscriptions')
payer_name = models.CharField(max_length=255)
payer_email = models.CharField(max_length=255)
currency = models.CharField(max_length=255)
amount = MoneyField(null=True)
period = models.CharField(max_length=255)
app_fee = MoneyField(null=True)
fee_payer = models.CharField(max_length=255)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
payment_method_id = models.BigIntegerField(null=True)
payment_method_type = models.CharField(max_length=255)
quantity = models.BigIntegerField(null=True)
mode = models.CharField(max_length=255)
trial_days_remaining = models.BigIntegerField(null=True)
transition_expire_time = models.BigIntegerField(null=True)
transition_prorate = models.NullBooleanField()
transition_quantity = models.BigIntegerField(null=True)
transition_subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'),
related_name='transition_subscriptions')
reference_id = models.CharField(max_length=255)
objects = SubscriptionManager()
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription')
db_table = 'djwepay_subscription'
verbose_name = 'WePay Subscription'
class SubscriptionCharge(SubscriptionChargeApi, BaseModel):
subscription_charge_id = models.BigIntegerField(primary_key=True)
subscription_plan = models.ForeignKey(
get_wepay_model_name('subscription_plan'), related_name='subscription_charges')
subscription = models.ForeignKey(
get_wepay_model_name('subscription'), related_name='subscription_charges')
type = models.CharField(max_length=255)
amount = MoneyField(null=True)
currency = models.CharField(max_length=3)
fee = MoneyField(null=True)
app_fee = MoneyField(null=True)
gross = MoneyField(null=True)
quantity = models.BigIntegerField(null=True)
amount_refunded = MoneyField(null=True)
amount_charged_back = MoneyField(null=True)
state = models.CharField(max_length=255)
create_time = models.BigIntegerField(null=True)
end_time = models.BigIntegerField(null=True)
prorate_time = models.BigIntegerField(null=True)
class Meta(BaseModel.Meta):
abstract = is_abstract('subscription_charge')
db_table = 'djwepay_subscription_charge'
verbose_name = 'WePay Subscription Charge'
| |
'''
Weather station:
One script to rule them all...
HMH - 18/07/2018
'''
import sys,time,os
import Adafruit_DHT, Adafruit_MCP3008
import Adafruit_GPIO.SPI as SPI
import RPi.GPIO as GPIO
import spidev
import numpy as np
from gpiozero import DigitalInputDevice
from time import sleep
#import math
#import subprocess
import datetime,requests,json
import smtplib
from email.mime.text import MIMEText
import simple_read_windspeed as srw
#import analog_read as ar
try:
import aqi
except:
print 'USB not connected'
import platform, string
def sendemail(from_addr, to_addr_list,
subject, message,
login, password,
smtpserver='smtp.gmail.com:587'):
header = 'From: %s\n' % from_addr
header += 'To: %s\n' % ','.join(to_addr_list)
header += 'Subject: %s\n\n' % subject
message = header + message
server = smtplib.SMTP(smtpserver)
server.starttls()
server.login(login,password)
problems = server.sendmail(from_addr, to_addr_list, message)
server.quit()
return problems
def get_temp_hum(sensor,pin):
t_array = np.zeros(10)
h_array = np.zeros(10)
for i in range(0,len(t_array)):
h_array[i], t_array[i] = Adafruit_DHT.read_retry(sensor, pin)
humidity = np.median(h_array)
temperature = np.median(t_array)
return humidity, temperature
def windspeed_helper():
count = 0
wind_speed_sensor = srw.DigitalInputDevice(5)
wind_speed_sensor.when_activated = srw.spin
time_interval = 0.5*60 # seconds
time_later = time.time()
timestamp = time.time()
wind_array = []
while time_later < timestamp + time_interval:
srw.count = 0
srw.sleep(5)
instantaneous_windspeed = srw.get_windspeed()
if count == 1:
instantaneous_windspeed = 0.0
wind_array.append(instantaneous_windspeed)
time_later = time.time()
#windspeed = srw.calculate_speed(5)
#wind_array = simple_read_windspeed.wind_val
#windspeed = np.mean(wind_array)
#print "value from anemometer: ",wind_array
return wind_array
def dust_helper():
pm25 = []
pm10 = []
aqi.cmd_set_sleep(0)
aqi.cmd_set_mode(1);
for t in range(15):
values = aqi.cmd_query_data();
if values is not None:
pm25.append(values[0])
pm10.append(values[1])
time.sleep(2)
#print pm10
#print pm25
#print("Going to sleep for 5min...")
aqi.cmd_set_mode(0);
aqi.cmd_set_sleep()
#time.sleep(300)
return pm10,pm25
def read_analog(numSamples,pinVal):
#Hardware SPI configuration:
SPI_PORT = 0
SPI_DEVICE = 0
mcp = Adafruit_MCP3008.MCP3008(spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE))
# Choose GPIO pin - not actually sure if we need this, but leaving it in for meow
ledPin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(ledPin,GPIO.OUT)
samplingTime = 280.0
deltaTime = 40.0
sleepTime = 9680.0
return_array = []
try:
for i in range(0,numSamples):
GPIO.output(ledPin,0)
time.sleep(samplingTime*10.0**-6)
# The read_adc function will get the value of the specified channel
voMeasured = mcp.read_adc(pinVal)
time.sleep(samplingTime*10.0**-6)
GPIO.output(ledPin,1)
time.sleep(samplingTime*10.0**-6)
calcVoltage = voMeasured*(5.0/1024)
return_array.append(calcVoltage)
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
return return_array
if __name__=="__main__":
error_log_name = 'error_log.txt'
erf = open(error_log_name,'a')
myname = os.uname()[1]
try:
# Send email to let human know I'm alive
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'System has restarted',
message = 'Weather station '+myname+' has rebooted and the script is running!',
login = 'oddweatherstation',
password = 'winteriscoming')
except Exception as e:
print "Gmail doesn't like the machine"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
erf.close()
print "Welcome to your local weather station. Sit back, relax, and have the weather measured at you. Some of the measurements take some time, so if it looks like nothing is happening, chill for a while. If nothing continues to happen, then perhaps something strange is on your foot."
# set operations flags:
Temp_flag = 0
WS_flag = 0
WD_flag = 0
Gas_flag = 0
Dust_flag = 0
data_loc = '/home/pi/Desktop/Weather_Station/data/'
p = platform.system()
if p == 'Windows':
data_loc = string.replace(data_loc,'/','\\')
Zuma = 'notmypresident'
while Zuma == 'notmypresident': #notmypresident
timestamp = time.time() # UTC
file_time = datetime.datetime.fromtimestamp(timestamp).strftime('%Y_%m_%d_%H_%M_%S')
file_name = data_loc+'data_'+file_time+'.txt'
f = open(file_name,'a')
erf = open(error_log_name,'a')
time_interval = 24*60*60 # seconds
time_later = time.time()
while time_later < timestamp + time_interval:
# Temperature and humidity:
m_time = time.time()
print "The time is...:", m_time
print "Yeah... bet you can read that..."
print "Checking temperature and humidity"
try:
sensor2 = Adafruit_DHT.DHT22
pin2=24
humidity, temperature = get_temp_hum(sensor2,pin2)
print 'Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity)
except Exception as e:
print 'Failed to get temperature and humidity reading'
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Temp_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Temperature sensor down',
message = 'Weather station '+myname+' temperature gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
Temp_flag = 1
except:
print "Gmail doesn't like the machine"
# Gas
print "Smelling gas"
try:
gas_array = read_analog(numSamples=10,pinVal=1)
#print gas_array
gas = np.mean(gas_array)
print 'Gas = ',gas
except Exception as e:
print "We have a gas issue..."
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Gas_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Gas sensor down',
message = 'Weather station '+myname+' gas gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
Gas_flag = 1
except:
print "Gmail doesn't like the machine"
# Dust
print "Eating dust"
try:
pm10_array,pm25_array = dust_helper()
pm10 = np.median(pm10_array) # 10 microns
pm25 = np.median(pm25_array) # 2.5 microns
print 'pm 2.5 = {0:0.1f}, pm 10 = {1:0.1f}'.format(pm25,pm10)
#print 'chilling for a while'
#time.sleep(300) # this can be removed once the timing is sorted out - just here for now to stop the fan spinning up every 3 seconds
except Exception as e:
print"We are but shadows and dust, but not dust in the wind."
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if Dust_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Dust sensor down',
message = 'Weather station '+myname+' dust gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
Dust_flag = 1
except:
print "Gmail doesn't like the machine"
# Run wind stuff for 300 seconds...
# Windspeed
print "Checking wind speed"
try:
windspeed_array = windspeed_helper()
windspeed = np.median(windspeed_array)
print 'Wind={0:0.1f} kph'.format(windspeed)
except Exception as e:
print 'Wind failed to pass'
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if WS_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Wind speed sensor down',
message = 'Weather station '+myname+' windspeed gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
WS_flag = 1
except:
print "Gmail doesn't like the machine"
# Wind Direction
print "Checking wind direction"
try:
wind_dir_array = read_analog(numSamples=10,pinVal=3)
winddir = np.median(wind_dir_array)
print 'Wind direction = {0:0.1f}'.format(winddir)
except Exception as e:
print "the wind is lacking direction"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
if WD_flag == 0:
try:
sendemail(from_addr = 'oddweatherstation@gmail.com',
to_addr_list = ['heiko@opendata.durban'],
subject = 'Wind direction sensor down',
message = 'Weather station '+myname+' wind direction gauge is not working',
login = 'oddweatherstation',
password = 'winteriscoming')
WD_flag = 1
except:
print "Gmail doesn't like the machine"
'''
print 'recording data'
line = str(temperature)+','+str(humidity)+','+str(windspeed)+','+str(winddir)+','+str(gas)+','+str(pm10)+','+str(pm25)+','+str(m_time)
f.write(line)
f.write('\n')
print 'talking to server'
# post to the village
payload = {'temp': temperature,'humid':humidity,'rain' : 0.0, 'press': 0.0}
headers = {'Content-Type': 'application/json', 'Accept':'application/json'}
try:
r = requests.post("http://citizen-sensors.herokuapp.com/ewok-village-5000", data=json.dumps(payload),headers=headers)
except Exception as e:
print "Server not listening to me - no one ever listens to me!!!"
etime = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S')
erf.write(etime)
erf.write('\n')
erf.write(str(e))
erf.write('\n')
time.sleep(10)
time_later = time.time()
'''
f.close()
erf.close()
| |
"""TUI Textual User Interface - A sane command line user interface.
Author: Joel Hedlund <yohell@ifm.liu.se>
This module contains format classes for use in textual user interfaces.
If you have problems with this package, please contact the author.
"""
__version__ = "1.0.0"
__copyright__ = "Copyright (c) 2011 Joel Hedlund."
__license__ = "MIT"
import re
class ParseError(Exception):
def __init__(self, file, line, details, message=None):
if message is None:
message = "[%r:%s] %s" % (file, line, details)
elif file or line or details:
raise ValueError('use either default or custom error message, not both')
self.message = "[%r:%s] %s" % (file, line, details)
def __str__ (self):
return self.message
class TextBlock(object):
def __init__(self, tabsize=4):
"""
tabsize > 0 replaces tab characters with that many spaces. Set <= 0 to
disable.
"""
self.lines = []
self.tabsize = tabsize
def __len__(self):
return len(self.lines)
def startblock(self):
pass
def addline(self, line):
"""Add a line (no trailing newlines) to the text block."""
self.lines.append(line)
def text(self):
"""Return the text in the block as a list of lines."""
return self.lines
class SingleParagraph(TextBlock):
def addline(self, line):
line = line.strip()
if not line:
return
self.lines.append(line)
def text(self):
"""Return the paragraph as a string."""
return [' '.join(self.lines)]
class IndentedParagraphs(TextBlock):
def startblock(self):
self._previous_indent = -1
self._keep_indent = True
def addline(self, line):
if self.tabsize:
line = line.replace('\t', ' ' * self.tabsize)
indent = len(line) - len(line.lstrip())
rstripped = line.rstrip()
if rstripped.endswith('\\'):
rstripped = rstripped[:-1].rstrip()
keep_next_indent = True
else:
keep_next_indent = False
if not rstripped:
self.lines.append(rstripped)
self._previous_indent = -1
elif self._keep_indent or indent != self._previous_indent:
self.lines.append(rstripped)
self._previous_indent = indent
else:
self.lines[-1] += ' ' + rstripped.strip()
self._keep_indent = keep_next_indent
def text(self):
"""Return the indented paragraphs as a list of strings."""
return self.lines
class Decommenter(object):
"""Base class for Decommenters for TextBlockParsers."""
def decomment(self, line):
"""Remove the comment parts from a line of text.
Return None to indicate that the line is completely commented out and
that it should not be fed to TextBlock storage.
"""
class UnescapedHashDecommenter(Decommenter):
"""Shell style unescaped hash line comments.
Use backslashes to escape # characters if you want to use them in text.
Backslashes have no special meaning anywhere else, but if you feel an urge
to have backslashes directly preceding hashes in your output, type twice as
many as you want, plus one more to keep the hash character from becoming a
comment. Examples:
# is a comment
\# is a hash character.
\\# is a backslash followed by comment.
\\\\\# is two backslashes and a hash character.
\\\\\\# is three backslashes and a comment.
... you get the picture.
"""
def decomment(self, line):
if line and line[0] == '#':
return None
decomment = r'(\\*)\1(#.*|\\(#))'
# repl is a workaround for the fact that python <3.3 re gives None
# rather than '' for nonmatching groups.
repl = lambda m: m.group(1) + (m.group(3) or '')
return re.sub(decomment, repl, line)
class TextBlockParser(object):
def __init__(self,
untagged=IndentedParagraphs,
decommenter=UnescapedHashDecommenter,
tabsize=4,
blocks=None,
labelled_classes=None,
names=None):
"""
untagged determines what to do with text before the first tagged text
block and should be a TextBlock subclass, or None to raise ParseError.
decommenter determines how to strip comments from the text. Set to None
to not decomment.
tabsize > 0 replaces tab characters with that many spaces. Set <= 0 to
disable.
"""
if isinstance(untagged, type(TextBlock)):
untagged = untagged()
self.untagged = untagged
if isinstance(decommenter, type(Decommenter)):
decommenter = decommenter()
self.decommenter = decommenter
self.tabsize = tabsize
if blocks is None:
blocks = {}
self.blocks = blocks
if labelled_classes is None:
labelled_classes = dict()
self.labelled_classes = labelled_classes
if names is None:
names = dict((name.upper, name) for name in blocks)
self.names = names
def __getitem__(self, name):
if name in self.labelled_classes:
return dict((label, labelled_block.text()) for label, labelled_block in self.blocks[name].items())
return self.blocks[name].text()
def addblock(self, name, textblockclass, labelled=False, tag=None):
if name in self.blocks:
raise ValueError('name already in use')
if not tag:
tag = name.upper()
if tag in self.names:
raise ValueError('tag already in use')
self.names[tag] = name
if labelled:
self.blocks[name] = dict()
self.labelled_classes[name] = textblockclass
else:
self.blocks[name] = textblockclass()
def parse(self, file):
"""Parse text blocks from a file."""
if isinstance(file, basestring):
file = open(file)
line_number = 0
label = None
block = self.untagged
for line in file:
line_number += 1
line = line.rstrip('\n')
if self.tabsize > 0:
line = line.replace('\t', ' ' * self.tabsize)
if self.decommenter:
line = self.decommenter.decomment(line)
if line is None:
continue
tag = line.split(':', 1)[0].strip()
# Still in the same block?
if tag not in self.names:
if block is None:
if line and not line.isspace():
raise ParseError(file.name, line, "garbage before first block: %r" % line)
continue
block.addline(line)
continue
# Open a new block.
name = self.names[tag]
label = line.split(':',1)[1].strip()
if name in self.labelled_classes:
if not label:
raise ParseError(file.name, line, "missing label for %r block" % name)
block = self.blocks[name].setdefault(label, self.labelled_classes[name]())
else:
if label:
msg = "label %r present for unlabelled block %r" % (label, name)
raise ParseError(file.name, line_number, msg)
block = self.blocks[name]
block.startblock()
def text(self):
return dict((name, self[name]) for name in self.blocks)
| |
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of SQLAlchemy backend."""
import sys
from oslo_config import cfg
from oslo_db import exception as common_db_exc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log as logging
import sqlalchemy as sa
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import desc
from blazar.db import exceptions as db_exc
from blazar.db.sqlalchemy import facade_wrapper
from blazar.db.sqlalchemy import models
RESOURCE_PROPERTY_MODELS = {
'physical:host': models.ComputeHostExtraCapability,
}
LOG = logging.getLogger(__name__)
get_engine = facade_wrapper.get_engine
get_session = facade_wrapper.get_session
def get_backend():
"""The backend is this module itself."""
return sys.modules[__name__]
def model_query(model, session=None):
"""Query helper.
:param model: base model to query
"""
session = session or get_session()
return session.query(model)
def setup_db():
try:
engine = db_session.EngineFacade(cfg.CONF.database.connection,
sqlite_fk=True).get_engine()
models.Lease.metadata.create_all(engine)
except sa.exc.OperationalError as e:
LOG.error("Database registration exception: %s", e)
return False
return True
def drop_db():
try:
engine = db_session.EngineFacade(cfg.CONF.database.connection,
sqlite_fk=True).get_engine()
models.Lease.metadata.drop_all(engine)
except Exception as e:
LOG.error("Database shutdown exception: %s", e)
return False
return True
# Helpers for building constraints / equality checks
def constraint(**conditions):
return Constraint(conditions)
def equal_any(*values):
return EqualityCondition(values)
def not_equal(*values):
return InequalityCondition(values)
class Constraint(object):
def __init__(self, conditions):
self.conditions = conditions
def apply(self, model, query):
for key, condition in self.conditions.items():
for clause in condition.clauses(getattr(model, key)):
query = query.filter(clause)
return query
class EqualityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return sa.or_([field == value for value in self.values])
class InequalityCondition(object):
def __init__(self, values):
self.values = values
def clauses(self, field):
return [field != value for value in self.values]
# Reservation
def _reservation_get(session, reservation_id):
query = model_query(models.Reservation, session)
return query.filter_by(id=reservation_id).first()
def reservation_get(reservation_id):
return _reservation_get(get_session(), reservation_id)
def reservation_get_all():
query = model_query(models.Reservation, get_session())
return query.all()
def reservation_get_all_by_lease_id(lease_id):
reservations = (model_query(models.Reservation,
get_session()).filter_by(lease_id=lease_id))
return reservations.all()
def reservation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
reservation_query = model_query(models.Reservation, get_session())
for name, value in kwargs.items():
column = getattr(models.Reservation, name, None)
if column:
reservation_query = reservation_query.filter(column == value)
return reservation_query.all()
def reservation_create(values):
values = values.copy()
reservation = models.Reservation()
reservation.update(values)
session = get_session()
with session.begin():
try:
reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=reservation.__class__.__name__, columns=e.columns)
return reservation_get(reservation.id)
def reservation_update(reservation_id, values):
session = get_session()
with session.begin():
reservation = _reservation_get(session, reservation_id)
reservation.update(values)
reservation.save(session=session)
return reservation_get(reservation_id)
def reservation_destroy(reservation_id):
session = get_session()
with session.begin():
reservation = _reservation_get(session, reservation_id)
if not reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(id=reservation_id,
model='Reservation')
session.delete(reservation)
# Lease
def _lease_get(session, lease_id):
query = model_query(models.Lease, session)
return query.filter_by(id=lease_id).first()
def lease_get(lease_id):
return _lease_get(get_session(), lease_id)
def lease_get_all():
query = model_query(models.Lease, get_session())
return query.all()
def lease_get_all_by_project(project_id):
raise NotImplementedError
def lease_get_all_by_user(user_id):
raise NotImplementedError
def lease_list(project_id=None):
query = model_query(models.Lease, get_session())
if project_id is not None:
query = query.filter_by(project_id=project_id)
return query.all()
def lease_create(values):
values = values.copy()
lease = models.Lease()
reservations = values.pop("reservations", [])
events = values.pop("events", [])
lease.update(values)
session = get_session()
with session.begin():
try:
lease.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=lease.__class__.__name__, columns=e.columns)
try:
for r in reservations:
reservation = models.Reservation()
reservation.update({"lease_id": lease.id})
reservation.update(r)
reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=reservation.__class__.__name__, columns=e.columns)
try:
for e in events:
event = models.Event()
event.update({"lease_id": lease.id})
event.update(e)
event.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=event.__class__.__name__, columns=e.columns)
return lease_get(lease.id)
def lease_update(lease_id, values):
session = get_session()
with session.begin():
lease = _lease_get(session, lease_id)
lease.update(values)
lease.save(session=session)
return lease_get(lease_id)
def lease_destroy(lease_id):
session = get_session()
with session.begin():
lease = _lease_get(session, lease_id)
if not lease:
# raise not found error
raise db_exc.BlazarDBNotFound(id=lease_id, model='Lease')
session.delete(lease)
# Event
def _event_get(session, event_id):
query = model_query(models.Event, session)
return query.filter_by(id=event_id).first()
def _event_get_all(session):
query = model_query(models.Event, session)
return query
def event_get(event_id):
return _event_get(get_session(), event_id)
def event_get_all():
return _event_get_all(get_session()).all()
def _event_get_sorted_by_filters(sort_key, sort_dir, filters):
"""Return an event query filtered and sorted by name of the field."""
sort_fn = {'desc': desc, 'asc': asc}
events_query = _event_get_all(get_session())
if 'status' in filters:
events_query = (
events_query.filter(models.Event.status == filters['status']))
if 'lease_id' in filters:
events_query = (
events_query.filter(models.Event.lease_id == filters['lease_id']))
if 'event_type' in filters:
events_query = events_query.filter(models.Event.event_type ==
filters['event_type'])
if 'time' in filters:
border = filters['time']['border']
if filters['time']['op'] == 'lt':
events_query = events_query.filter(models.Event.time < border)
elif filters['time']['op'] == 'le':
events_query = events_query.filter(models.Event.time <= border)
elif filters['time']['op'] == 'gt':
events_query = events_query.filter(models.Event.time > border)
elif filters['time']['op'] == 'ge':
events_query = events_query.filter(models.Event.time >= border)
elif filters['time']['op'] == 'eq':
events_query = events_query.filter(models.Event.time == border)
events_query = events_query.order_by(
sort_fn[sort_dir](getattr(models.Event, sort_key))
)
return events_query
def event_get_first_sorted_by_filters(sort_key, sort_dir, filters):
"""Return first result for events
Return the first result for all events matching the filters
and sorted by name of the field.
"""
return _event_get_sorted_by_filters(sort_key, sort_dir, filters).first()
def event_get_all_sorted_by_filters(sort_key, sort_dir, filters):
"""Return events filtered and sorted by name of the field."""
return _event_get_sorted_by_filters(sort_key, sort_dir, filters).all()
def event_create(values):
values = values.copy()
event = models.Event()
event.update(values)
session = get_session()
with session.begin():
try:
event.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=event.__class__.__name__, columns=e.columns)
return event_get(event.id)
def event_update(event_id, values):
session = get_session()
with session.begin():
event = _event_get(session, event_id)
event.update(values)
event.save(session=session)
return event_get(event_id)
def event_destroy(event_id):
session = get_session()
with session.begin():
event = _event_get(session, event_id)
if not event:
# raise not found error
raise db_exc.BlazarDBNotFound(id=event_id, model='Event')
session.delete(event)
# ComputeHostReservation
def _host_reservation_get(session, host_reservation_id):
query = model_query(models.ComputeHostReservation, session)
return query.filter_by(id=host_reservation_id).first()
def host_reservation_get(host_reservation_id):
return _host_reservation_get(get_session(),
host_reservation_id)
def host_reservation_get_all():
query = model_query(models.ComputeHostReservation, get_session())
return query.all()
def _host_reservation_get_by_reservation_id(session, reservation_id):
query = model_query(models.ComputeHostReservation, session)
return query.filter_by(reservation_id=reservation_id).first()
def host_reservation_get_by_reservation_id(reservation_id):
return _host_reservation_get_by_reservation_id(get_session(),
reservation_id)
def host_reservation_create(values):
values = values.copy()
host_reservation = models.ComputeHostReservation()
host_reservation.update(values)
session = get_session()
with session.begin():
try:
host_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_reservation.__class__.__name__, columns=e.columns)
return host_reservation_get(host_reservation.id)
def host_reservation_update(host_reservation_id, values):
session = get_session()
with session.begin():
host_reservation = _host_reservation_get(session,
host_reservation_id)
host_reservation.update(values)
host_reservation.save(session=session)
return host_reservation_get(host_reservation_id)
def host_reservation_destroy(host_reservation_id):
session = get_session()
with session.begin():
host_reservation = _host_reservation_get(session,
host_reservation_id)
if not host_reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_reservation_id, model='ComputeHostReservation')
session.delete(host_reservation)
# InstanceReservation
def instance_reservation_create(values):
value = values.copy()
instance_reservation = models.InstanceReservations()
instance_reservation.update(value)
session = get_session()
with session.begin():
try:
instance_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=instance_reservation.__class__.__name__,
columns=e.columns)
return instance_reservation_get(instance_reservation.id)
def instance_reservation_get(instance_reservation_id, session=None):
if not session:
session = get_session()
query = model_query(models.InstanceReservations, session)
return query.filter_by(id=instance_reservation_id).first()
def instance_reservation_update(instance_reservation_id, values):
session = get_session()
with session.begin():
instance_reservation = instance_reservation_get(
instance_reservation_id, session)
if not instance_reservation:
raise db_exc.BlazarDBNotFound(
id=instance_reservation_id, model='InstanceReservations')
instance_reservation.update(values)
instance_reservation.save(session=session)
return instance_reservation_get(instance_reservation_id)
def instance_reservation_destroy(instance_reservation_id):
session = get_session()
with session.begin():
instance = instance_reservation_get(instance_reservation_id)
if not instance:
raise db_exc.BlazarDBNotFound(
id=instance_reservation_id, model='InstanceReservations')
session.delete(instance)
# ComputeHostAllocation
def _host_allocation_get(session, host_allocation_id):
query = model_query(models.ComputeHostAllocation, session)
return query.filter_by(id=host_allocation_id).first()
def host_allocation_get(host_allocation_id):
return _host_allocation_get(get_session(),
host_allocation_id)
def host_allocation_get_all():
query = model_query(models.ComputeHostAllocation, get_session())
return query.all()
def host_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.ComputeHostAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.ComputeHostAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def host_allocation_create(values):
values = values.copy()
host_allocation = models.ComputeHostAllocation()
host_allocation.update(values)
session = get_session()
with session.begin():
try:
host_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_allocation.__class__.__name__, columns=e.columns)
return host_allocation_get(host_allocation.id)
def host_allocation_update(host_allocation_id, values):
session = get_session()
with session.begin():
host_allocation = _host_allocation_get(session,
host_allocation_id)
host_allocation.update(values)
host_allocation.save(session=session)
return host_allocation_get(host_allocation_id)
def host_allocation_destroy(host_allocation_id):
session = get_session()
with session.begin():
host_allocation = _host_allocation_get(session,
host_allocation_id)
if not host_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_allocation_id, model='ComputeHostAllocation')
session.delete(host_allocation)
# ComputeHost
def _host_get(session, host_id):
query = model_query(models.ComputeHost, session)
return query.filter_by(id=host_id).first()
def _host_get_all(session):
query = model_query(models.ComputeHost, session)
return query
def host_get(host_id):
return _host_get(get_session(), host_id)
def host_list():
return model_query(models.ComputeHost, get_session()).all()
def host_get_all_by_filters(filters):
"""Returns hosts filtered by name of the field."""
hosts_query = _host_get_all(get_session())
if 'status' in filters:
hosts_query = hosts_query.filter(
models.ComputeHost.status == filters['status'])
return hosts_query.all()
def host_get_all_by_queries(queries):
"""Returns hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
hosts_query = model_query(models.ComputeHost, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
hosts = []
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.ComputeHost, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
hosts_query = hosts_query.filter(filt)
else:
# looking for resource properties matches
extra_filter = (
_host_resource_property_query(get_session())
.filter(models.ResourceProperty.property_name == key)
).all()
if not extra_filter:
raise db_exc.BlazarDBNotFound(
id=key, model='ComputeHostExtraCapability')
for host, property_name in extra_filter:
print(dir(host))
if op in oper and oper[op][1](host.capability_value, value):
hosts.append(host.computehost_id)
elif op not in oper:
msg = 'Operator %s for resource properties not implemented'
raise NotImplementedError(msg % op)
# We must also avoid selecting any host which doesn't have the
# extra capability present.
all_hosts = [h.id for h in hosts_query.all()]
extra_filter_hosts = [h.computehost_id for h, _ in extra_filter]
hosts += [h for h in all_hosts if h not in extra_filter_hosts]
return hosts_query.filter(~models.ComputeHost.id.in_(hosts)).all()
def reservable_host_get_all_by_queries(queries):
"""Returns reservable hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return host_get_all_by_queries(queries)
def unreservable_host_get_all_by_queries(queries):
"""Returns unreservable hosts filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
# TODO(hiro-kobayashi): support the expression 'reservable == False'
queries.append('reservable == 0')
return host_get_all_by_queries(queries)
def host_create(values):
values = values.copy()
host = models.ComputeHost()
host.update(values)
session = get_session()
with session.begin():
try:
host.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host.__class__.__name__, columns=e.columns)
return host_get(host.id)
def host_update(host_id, values):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
host.update(values)
host.save(session=session)
return host_get(host_id)
def host_destroy(host_id):
session = get_session()
with session.begin():
host = _host_get(session, host_id)
if not host:
# raise not found error
raise db_exc.BlazarDBNotFound(id=host_id, model='Host')
session.delete(host)
# ComputeHostExtraCapability
def _host_resource_property_query(session):
return (
model_query(models.ComputeHostExtraCapability, session)
.join(models.ResourceProperty)
.add_column(models.ResourceProperty.property_name))
def _host_extra_capability_get(session, host_extra_capability_id):
query = _host_resource_property_query(session).filter(
models.ComputeHostExtraCapability.id == host_extra_capability_id)
return query.first()
def host_extra_capability_get(host_extra_capability_id):
return _host_extra_capability_get(get_session(),
host_extra_capability_id)
def _host_extra_capability_get_all_per_host(session, host_id):
query = _host_resource_property_query(session).filter(
models.ComputeHostExtraCapability.computehost_id == host_id)
return query
def host_extra_capability_get_all_per_host(host_id):
return _host_extra_capability_get_all_per_host(get_session(),
host_id).all()
def host_extra_capability_create(values):
values = values.copy()
resource_property = resource_property_get_or_create(
'physical:host', values.get('property_name'))
del values['property_name']
values['property_id'] = resource_property.id
host_extra_capability = models.ComputeHostExtraCapability()
host_extra_capability.update(values)
session = get_session()
with session.begin():
try:
host_extra_capability.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=host_extra_capability.__class__.__name__,
columns=e.columns)
return host_extra_capability_get(host_extra_capability.id)
def host_extra_capability_update(host_extra_capability_id, values):
session = get_session()
with session.begin():
host_extra_capability, _ = (
_host_extra_capability_get(session,
host_extra_capability_id))
host_extra_capability.update(values)
host_extra_capability.save(session=session)
return host_extra_capability_get(host_extra_capability_id)
def host_extra_capability_destroy(host_extra_capability_id):
session = get_session()
with session.begin():
host_extra_capability = _host_extra_capability_get(
session, host_extra_capability_id)
if not host_extra_capability:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=host_extra_capability_id,
model='ComputeHostExtraCapability')
session.delete(host_extra_capability[0])
def host_extra_capability_get_all_per_name(host_id, property_name):
session = get_session()
with session.begin():
query = _host_extra_capability_get_all_per_host(session, host_id)
return query.filter(
models.ResourceProperty.property_name == property_name).all()
# FloatingIP reservation
def fip_reservation_create(fip_reservation_values):
values = fip_reservation_values.copy()
fip_reservation = models.FloatingIPReservation()
fip_reservation.update(values)
session = get_session()
with session.begin():
try:
fip_reservation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=fip_reservation.__class__.__name__, columns=e.columns)
return fip_reservation_get(fip_reservation.id)
def _fip_reservation_get(session, fip_reservation_id):
query = model_query(models.FloatingIPReservation, session)
return query.filter_by(id=fip_reservation_id).first()
def fip_reservation_get(fip_reservation_id):
return _fip_reservation_get(get_session(), fip_reservation_id)
def fip_reservation_update(fip_reservation_id, fip_reservation_values):
session = get_session()
with session.begin():
fip_reservation = _fip_reservation_get(session, fip_reservation_id)
fip_reservation.update(fip_reservation_values)
fip_reservation.save(session=session)
return fip_reservation_get(fip_reservation_id)
def fip_reservation_destroy(fip_reservation_id):
session = get_session()
with session.begin():
fip_reservation = _fip_reservation_get(session, fip_reservation_id)
if not fip_reservation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=fip_reservation_id, model='FloatingIPReservation')
session.delete(fip_reservation)
# Required FIP
def required_fip_create(required_fip_values):
values = required_fip_values.copy()
required_fip = models.RequiredFloatingIP()
required_fip.update(values)
session = get_session()
with session.begin():
try:
required_fip.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=required_fip.__class__.__name__, columns=e.columns)
return required_fip_get(required_fip.id)
def _required_fip_get(session, required_fip_id):
query = model_query(models.RequiredFloatingIP, session)
return query.filter_by(id=required_fip_id).first()
def required_fip_get(required_fip_id):
return _required_fip_get(get_session(), required_fip_id)
def required_fip_update(required_fip_id, required_fip_values):
session = get_session()
with session.begin():
required_fip = _required_fip_get(session, required_fip_id)
required_fip.update(required_fip_values)
required_fip.save(session=session)
return required_fip_get(required_fip_id)
def required_fip_destroy(required_fip_id):
session = get_session()
with session.begin():
required_fip = _required_fip_get(session, required_fip_id)
if not required_fip:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=required_fip_id, model='RequiredFloatingIP')
session.delete(required_fip)
def required_fip_destroy_by_fip_reservation_id(fip_reservation_id):
session = get_session()
with session.begin():
required_fips = model_query(
models.RequiredFloatingIP, session).filter_by(
floatingip_reservation_id=fip_reservation_id)
for required_fip in required_fips:
required_fip_destroy(required_fip['id'])
# FloatingIP Allocation
def _fip_allocation_get(session, fip_allocation_id):
query = model_query(models.FloatingIPAllocation, session)
return query.filter_by(id=fip_allocation_id).first()
def fip_allocation_get(fip_allocation_id):
return _fip_allocation_get(get_session(), fip_allocation_id)
def fip_allocation_create(allocation_values):
values = allocation_values.copy()
fip_allocation = models.FloatingIPAllocation()
fip_allocation.update(values)
session = get_session()
with session.begin():
try:
fip_allocation.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=fip_allocation.__class__.__name__, columns=e.columns)
return fip_allocation_get(fip_allocation.id)
def fip_allocation_get_all_by_values(**kwargs):
"""Returns all entries filtered by col=value."""
allocation_query = model_query(models.FloatingIPAllocation, get_session())
for name, value in kwargs.items():
column = getattr(models.FloatingIPAllocation, name, None)
if column:
allocation_query = allocation_query.filter(column == value)
return allocation_query.all()
def fip_allocation_destroy(allocation_id):
session = get_session()
with session.begin():
fip_allocation = _fip_allocation_get(session, allocation_id)
if not fip_allocation:
# raise not found error
raise db_exc.BlazarDBNotFound(
id=allocation_id, model='FloatingIPAllocation')
session.delete(fip_allocation)
def fip_allocation_update(allocation_id, allocation_values):
session = get_session()
with session.begin():
fip_allocation = _fip_allocation_get(session, allocation_id)
fip_allocation.update(allocation_values)
fip_allocation.save(session=session)
return fip_allocation_get(allocation_id)
# Floating IP
def _floatingip_get(session, floatingip_id):
query = model_query(models.FloatingIP, session)
return query.filter_by(id=floatingip_id).first()
def _floatingip_get_all(session):
query = model_query(models.FloatingIP, session)
return query
def fip_get_all_by_queries(queries):
"""Returns Floating IPs filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
fips_query = model_query(models.FloatingIP, get_session())
oper = {
'<': ['lt', lambda a, b: a >= b],
'>': ['gt', lambda a, b: a <= b],
'<=': ['le', lambda a, b: a > b],
'>=': ['ge', lambda a, b: a < b],
'==': ['eq', lambda a, b: a != b],
'!=': ['ne', lambda a, b: a == b],
}
for query in queries:
try:
key, op, value = query.split(' ', 2)
except ValueError:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
column = getattr(models.FloatingIP, key, None)
if column is not None:
if op == 'in':
filt = column.in_(value.split(','))
else:
if op in oper:
op = oper[op][0]
try:
attr = [e for e in ['%s', '%s_', '__%s__']
if hasattr(column, e % op)][0] % op
except IndexError:
raise db_exc.BlazarDBInvalidFilterOperator(
filter_operator=op)
if value == 'null':
value = None
filt = getattr(column, attr)(value)
fips_query = fips_query.filter(filt)
else:
raise db_exc.BlazarDBInvalidFilter(query_filter=query)
return fips_query.all()
def reservable_fip_get_all_by_queries(queries):
"""Returns reservable fips filtered by an array of queries.
:param queries: array of queries "key op value" where op can be
http://docs.sqlalchemy.org/en/rel_0_7/core/expression_api.html
#sqlalchemy.sql.operators.ColumnOperators
"""
queries.append('reservable == 1')
return fip_get_all_by_queries(queries)
def floatingip_get(floatingip_id):
return _floatingip_get(get_session(), floatingip_id)
def floatingip_list():
return model_query(models.FloatingIP, get_session()).all()
def floatingip_create(values):
values = values.copy()
floatingip = models.FloatingIP()
floatingip.update(values)
session = get_session()
with session.begin():
try:
floatingip.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=floatingip.__class__.__name__, columns=e.columns)
return floatingip_get(floatingip.id)
def floatingip_destroy(floatingip_id):
session = get_session()
with session.begin():
floatingip = _floatingip_get(session, floatingip_id)
if not floatingip:
# raise not found error
raise db_exc.BlazarDBNotFound(id=floatingip_id, model='FloatingIP')
session.delete(floatingip)
# Resource Properties
def _resource_property_get(session, resource_type, property_name):
query = (
model_query(models.ResourceProperty, session)
.filter_by(resource_type=resource_type)
.filter_by(property_name=property_name))
return query.first()
def resource_property_get(resource_type, property_name):
return _resource_property_get(get_session(), resource_type, property_name)
def resource_properties_list(resource_type):
if resource_type not in RESOURCE_PROPERTY_MODELS:
raise db_exc.BlazarDBResourcePropertiesNotEnabled(
resource_type=resource_type)
session = get_session()
with session.begin():
resource_model = RESOURCE_PROPERTY_MODELS[resource_type]
query = session.query(
models.ResourceProperty.property_name,
models.ResourceProperty.private,
resource_model.capability_value).join(resource_model).distinct()
return query.all()
def _resource_property_create(session, values):
values = values.copy()
resource_property = models.ResourceProperty()
resource_property.update(values)
with session.begin():
try:
resource_property.save(session=session)
except common_db_exc.DBDuplicateEntry as e:
# raise exception about duplicated columns (e.columns)
raise db_exc.BlazarDBDuplicateEntry(
model=resource_property.__class__.__name__,
columns=e.columns)
return resource_property_get(values.get('resource_type'),
values.get('property_name'))
def resource_property_create(values):
return _resource_property_create(get_session(), values)
def resource_property_update(resource_type, property_name, values):
if resource_type not in RESOURCE_PROPERTY_MODELS:
raise db_exc.BlazarDBResourcePropertiesNotEnabled(
resource_type=resource_type)
values = values.copy()
session = get_session()
with session.begin():
resource_property = _resource_property_get(
session, resource_type, property_name)
if not resource_property:
raise db_exc.BlazarDBInvalidResourceProperty(
property_name=property_name,
resource_type=resource_type)
resource_property.update(values)
resource_property.save(session=session)
return resource_property_get(resource_type, property_name)
def _resource_property_get_or_create(session, resource_type, property_name):
resource_property = _resource_property_get(
session, resource_type, property_name)
if resource_property:
return resource_property
else:
rp_values = {
'resource_type': resource_type,
'property_name': property_name}
return resource_property_create(rp_values)
def resource_property_get_or_create(resource_type, property_name):
return _resource_property_get_or_create(
get_session(), resource_type, property_name)
| |
import numpy as np
import fcts
import random
import copy
import time
import matplotlib.pyplot as plt
class ABSIndividual:
'''
individual of artificial bee swarm algorithm
'''
def __init__(self, fct, vardim, bound):
'''
vardim: dimension of variables
bound: boundaries of variables
'''
self.vardim = vardim
self.bound = bound
self.fitness = 0.
self.trials = 0
self.fct = fct
def generate(self):
'''
generate a random chromsome for artificial bee swarm algorithm
'''
len = self.vardim
rnd = np.random.random(size=len)
self.chrom = np.zeros(len)
for i in range(0, len):
self.chrom[i] = self.bound[0, i] + \
(self.bound[1, i] - self.bound[0, i]) * rnd[i]
def calculateFitness(self):
'''
calculate the fitness of the chromsome
'''
self.fitness = -self.fct(self.chrom)
class ArtificialBeeSwarm:
'''
the class for artificial bee swarm algorithm
'''
def __init__(self, fct, sizepop, vardim, bound, MAXGEN):
'''
sizepop: population sizepop
vardim: dimension of variables
bound: boundaries of variables
MAXGEN: termination condition
params: algorithm required parameters, it is a list which is consisting of[trailLimit, C]
'''
self.sizepop = sizepop
self.vardim = vardim
self.bound = bound
self.foodSource = 100
self.MAXGEN = MAXGEN
self.params = [vardim * self.foodSource, 1]
self.population = []
self.fitness = np.zeros((self.sizepop, 1))
self.trace = np.zeros((self.MAXGEN, 2))
self.fct = fct
def initialize(self):
'''
initialize the population of abs
'''
for i in range(0, self.sizepop):
ind = ABSIndividual(self.fct, self.vardim, self.bound)
ind.generate()
self.population.append(ind)
def evaluation(self):
'''
evaluation the fitness of the population
'''
for i in range(0, self.sizepop):
self.population[i].calculateFitness()
self.fitness[i] = self.population[i].fitness
def employedBeePhase(self):
'''
employed bee phase
'''
for i in range(0, self.foodSource):
k = np.random.random_integers(0, self.vardim - 1)
j = np.random.random_integers(0, self.foodSource - 1)
while j == i:
j = np.random.random_integers(0, self.foodSource - 1)
vi = copy.deepcopy(self.population[i])
vi.chrom = vi.chrom + np.random.uniform(-1, 1, self.vardim) * (
vi.chrom - self.population[j].chrom) + np.random.uniform(0.0, self.params[1], self.vardim) * (self.best.chrom - vi.chrom)
for k in range(0, self.vardim):
if vi.chrom[k] < self.bound[0, k]:
vi.chrom[k] = self.bound[0, k]
if vi.chrom[k] > self.bound[1, k]:
vi.chrom[k] = self.bound[1, k]
vi.calculateFitness()
if vi.fitness > self.fitness[i]:
self.population[i] = vi
self.fitness[i] = vi.fitness
if vi.fitness > self.best.fitness:
self.best = vi
else:
self.population[i].trials += 1
def onlookerBeePhase(self):
'''
onlooker bee phase
'''
accuFitness = np.zeros((self.foodSource, 1))
maxFitness = np.max(self.fitness)
for i in range(0, self.foodSource):
accuFitness[i] = 0.9 * self.fitness[i] / maxFitness + 0.1
for i in range(0, self.foodSource):
for fi in range(0, self.foodSource):
r = random.random()
if r < accuFitness[i]:
#k = np.random.random_integers(0, self.vardim - 1)
j = np.random.random_integers(0, self.foodSource - 1)
while j == fi:
j = np.random.random_integers(0, self.foodSource - 1)
vi = copy.deepcopy(self.population[i])
vi.chrom = vi.chrom + np.random.uniform(-1, 1, self.vardim) * (
vi.chrom - self.population[j].chrom) + np.random.uniform(0.0, self.params[1], self.vardim) * (self.best.chrom - vi.chrom)
for k in range(0, self.vardim):
if vi.chrom[k] < self.bound[0, k]:
vi.chrom[k] = self.bound[0, k]
if vi.chrom[k] > self.bound[1, k]:
vi.chrom[k] = self.bound[1, k]
vi.calculateFitness()
if vi.fitness > self.fitness[fi]:
self.population[fi] = vi
self.fitness[fi] = vi.fitness
if vi.fitness > self.best.fitness:
self.best = vi
else:
self.population[fi].trials += 1
break
def scoutBeePhase(self):
'''
scout bee phase
'''
for i in range(0, self.foodSource):
if self.population[i].trials > self.params[0]:
self.population[i].generate()
self.population[i].trials = 0
self.population[i].calculateFitness()
self.fitness[i] = self.population[i].fitness
def solve(self):
'''
the evolution process of the abs algorithm
'''
#SM = []
self.t = 0
self.initialize()
self.evaluation()
best = np.max(self.fitness)
bestIndex = np.argmax(self.fitness)
self.best = copy.deepcopy(self.population[bestIndex - 1])
self.avefitness = np.mean(self.fitness)
self.trace[self.t, 0] = (1 - self.best.fitness) / self.best.fitness
self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
print("Generation %d: optimal function value is: %f; average function value is %f" % (
self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
while self.t < self.MAXGEN - 1:
self.t += 1
self.employedBeePhase()
self.onlookerBeePhase()
self.scoutBeePhase()
best = np.max(self.fitness)
bestIndex = np.argmax(self.fitness)
print(best)
#if (self.t - 1) % 50 == 0:
# SM.append(best)
'''
if best > self.best.fitness:
self.best = copy.deepcopy(self.population[bestIndex])
self.avefitness = np.mean(self.fitness)
self.trace[self.t, 0] = (1 - self.best.fitness) / (self.best.fitness+1e-60)
self.trace[self.t, 1] = (1 - self.avefitness) / self.avefitness
'''
# print("Generation %d: optimal function value is: %f; average function value is %f" % (
# self.t, self.trace[self.t, 0], self.trace[self.t, 1]))
print("Optimal function value is: %f; " % self.best.fitness)
print("Optimal solution is:")
print(self.best.chrom)
return -self.best.fitness
# self.printResult()
def printResult(self):
'''
plot the result of abs algorithm
'''
x = np.arange(0, self.MAXGEN)
y1 = self.trace[:, 0]
y2 = self.trace[:, 1]
plt.plot(x, y1, 'r', label='optimal value')
plt.plot(x, y2, 'g', label='average value')
plt.xlabel("Iteration")
plt.ylabel("function value")
plt.title("Artificial Bee Swarm algorithm for function optimization")
plt.legend()
plt.show()
if __name__ == "__main__":
s=time.clock()
bound = np.tile([[-10], [10]], 30)
abs = ArtificialBeeSwarm(fcts.griewank, 100, 30, bound, 1000)
abs.solve()
print(time.clock()-s)
| |
# Copyright 2014 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils
from nova import exception
from nova.objects import base
from nova.objects import fields
from nova.virt import hardware
def all_things_equal(obj_a, obj_b):
for name in obj_a.fields:
set_a = obj_a.obj_attr_is_set(name)
set_b = obj_b.obj_attr_is_set(name)
if set_a != set_b:
return False
elif not set_a:
continue
if getattr(obj_a, name) != getattr(obj_b, name):
return False
return True
@base.NovaObjectRegistry.register
class NUMACell(base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added pinned_cpus and siblings fields
# Version 1.2: Added mempages field
VERSION = '1.2'
fields = {
'id': fields.IntegerField(read_only=True),
'cpuset': fields.SetOfIntegersField(),
'memory': fields.IntegerField(),
'cpu_usage': fields.IntegerField(default=0),
'memory_usage': fields.IntegerField(default=0),
'pinned_cpus': fields.SetOfIntegersField(),
'siblings': fields.ListOfSetsOfIntegersField(),
'mempages': fields.ListOfObjectsField('NUMAPagesTopology'),
}
def __eq__(self, other):
return all_things_equal(self, other)
def __ne__(self, other):
return not (self == other)
@property
def free_cpus(self):
return self.cpuset - self.pinned_cpus or set()
@property
def free_siblings(self):
return [sibling_set & self.free_cpus
for sibling_set in self.siblings]
@property
def avail_cpus(self):
return len(self.free_cpus)
@property
def avail_memory(self):
return self.memory - self.memory_usage
def pin_cpus(self, cpus):
if cpus - self.cpuset:
raise exception.CPUPinningUnknown(requested=list(cpus),
cpuset=list(self.pinned_cpus))
if self.pinned_cpus & cpus:
raise exception.CPUPinningInvalid(requested=list(cpus),
pinned=list(self.pinned_cpus))
self.pinned_cpus |= cpus
def unpin_cpus(self, cpus):
if cpus - self.cpuset:
raise exception.CPUPinningUnknown(requested=list(cpus),
cpuset=list(self.pinned_cpus))
if (self.pinned_cpus & cpus) != cpus:
raise exception.CPUPinningInvalid(requested=list(cpus),
pinned=list(self.pinned_cpus))
self.pinned_cpus -= cpus
def _to_dict(self):
return {
'id': self.id,
'cpus': hardware.format_cpu_spec(
self.cpuset, allow_ranges=False),
'mem': {
'total': self.memory,
'used': self.memory_usage},
'cpu_usage': self.cpu_usage}
@classmethod
def _from_dict(cls, data_dict):
cpuset = hardware.parse_cpu_spec(
data_dict.get('cpus', ''))
cpu_usage = data_dict.get('cpu_usage', 0)
memory = data_dict.get('mem', {}).get('total', 0)
memory_usage = data_dict.get('mem', {}).get('used', 0)
cell_id = data_dict.get('id')
return cls(id=cell_id, cpuset=cpuset, memory=memory,
cpu_usage=cpu_usage, memory_usage=memory_usage,
mempages=[], pinned_cpus=set([]), siblings=[])
def can_fit_hugepages(self, pagesize, memory):
"""Returns whether memory can fit into hugepages size
:param pagesize: a page size in KibB
:param memory: a memory size asked to fit in KiB
:returns: whether memory can fit in hugepages
:raises: MemoryPageSizeNotSupported if page size not supported
"""
for pages in self.mempages:
if pages.size_kb == pagesize:
return (memory <= pages.free_kb and
(memory % pages.size_kb) == 0)
raise exception.MemoryPageSizeNotSupported(pagesize=pagesize)
@base.NovaObjectRegistry.register
class NUMAPagesTopology(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'size_kb': fields.IntegerField(),
'total': fields.IntegerField(),
'used': fields.IntegerField(default=0),
}
def __eq__(self, other):
return all_things_equal(self, other)
def __ne__(self, other):
return not (self == other)
@property
def free(self):
"""Returns the number of avail pages."""
return self.total - self.used
@property
def free_kb(self):
"""Returns the avail memory size in KiB."""
return self.free * self.size_kb
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class NUMATopology(base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: Update NUMACell to 1.1
# Version 1.2: Update NUMACell to 1.2
VERSION = '1.2'
fields = {
'cells': fields.ListOfObjectsField('NUMACell'),
}
@classmethod
def obj_from_primitive(cls, primitive, context=None):
if 'nova_object.name' in primitive:
obj_topology = super(NUMATopology, cls).obj_from_primitive(
primitive, context=context)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that there are no cases of the old format stored in
# the database (or forever, if we can never guarantee that).
obj_topology = NUMATopology._from_dict(primitive)
return obj_topology
def _to_json(self):
return jsonutils.dumps(self.obj_to_primitive())
@classmethod
def obj_from_db_obj(cls, db_obj):
return cls.obj_from_primitive(
jsonutils.loads(db_obj))
def __len__(self):
"""Defined so that boolean testing works the same as for lists."""
return len(self.cells)
def _to_dict(self):
# TODO(sahid): needs to be removed.
return {'cells': [cell._to_dict() for cell in self.cells]}
@classmethod
def _from_dict(cls, data_dict):
return cls(cells=[
NUMACell._from_dict(cell_dict)
for cell_dict in data_dict.get('cells', [])])
@base.NovaObjectRegistry.register
class NUMATopologyLimits(base.NovaObject):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
'cpu_allocation_ratio': fields.FloatField(),
'ram_allocation_ratio': fields.FloatField(),
}
def to_dict_legacy(self, host_topology):
cells = []
for cell in host_topology.cells:
cells.append(
{'cpus': hardware.format_cpu_spec(
cell.cpuset, allow_ranges=False),
'mem': {'total': cell.memory,
'limit': cell.memory * self.ram_allocation_ratio},
'cpu_limit': len(cell.cpuset) * self.cpu_allocation_ratio,
'id': cell.id})
return {'cells': cells}
@classmethod
def obj_from_db_obj(cls, db_obj):
if 'nova_object.name' in db_obj:
obj_topology = cls.obj_from_primitive(db_obj)
else:
# NOTE(sahid): This compatibility code needs to stay until we can
# guarantee that all compute nodes are using RPC API => 3.40.
cell = db_obj['cells'][0]
ram_ratio = cell['mem']['limit'] / float(cell['mem']['total'])
cpu_ratio = cell['cpu_limit'] / float(len(hardware.parse_cpu_spec(
cell['cpus'])))
obj_topology = NUMATopologyLimits(
cpu_allocation_ratio=cpu_ratio,
ram_allocation_ratio=ram_ratio)
return obj_topology
| |
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# auth: Philip J Grabner <grabner@cadit.com>
# date: 2015/12/01
# copy: (C) Copyright 2015-EOT Cadit Inc., All Rights Reserved.
#------------------------------------------------------------------------------
import logging
import re
import json
import types
import inspect
import copy
import yaml
import six
from aadict import aadict
import asset
import pyramid.httpexceptions
import morph
from .scope import Scope
from .i18n import _
from . import util
#------------------------------------------------------------------------------
log = logging.getLogger(__name__)
# TODO: here, ``scalar`` really means ``primitive``... replace.
# todo: should `union` really exist?... it was only implemented for
# "symmetry"...
# ==> perhapse the "union" name is also a bit misleading???
# in XMLSchema, "xsd:union" means the same as "one-of" here... ugh.
#------------------------------------------------------------------------------
class _aadict(aadict):
def __setattr__(self, key, value):
if value is None:
return self.__delattr__(key)
return super(_aadict, self).__setattr__(key, value)
def __eq__(self, target): return self.__cmp__(target) == 0
def __ne__(self, target): return self.__cmp__(target) != 0
def __lt__(self, target): return self.__cmp__(target) < 0
def __le__(self, target): return self.__cmp__(target) <= 0
def __gt__(self, target): return self.__cmp__(target) > 0
def __ge__(self, target): return self.__cmp__(target) >= 0
#------------------------------------------------------------------------------
class Type(_aadict):
'''
Represents a complex type definition. Types can have the following
attributes:
:Attributes:
name : str
The name of the type.
doc : str, optional, default: null
A description of the type.
base : str
This type's base type, which can be one of:
* `Type.SCALAR` (``scalar``)
* `Type.COMPOUND` (``compound``)
* `Type.LIST` (``list``)
* `Type.DICT` (``dict``)
* `Type.CONSTANT` (``constant``)
* `Type.UNKNOWN` (``unknown``)
* `Type.EXTENSION` (``extension``)
value : any, default: null
Additional type information for `constant` and `compound`
types. The `value` itself that depends on type, as follows:
* dict: a list of TypeRef's.
* list: the type of item that this list comprises.
* oneof: a list of alternate Type's.
* union: a list of required Type's.
* constant: the value of the constant, e.g. ``"female"``.
* extension: a TypeRef.
children : iterator
This is a "helper" attribute that returns an iterator of compound
types' `value`. For `list` types, this will only yield one
value. For non-compound types, this will not yield anything.
IMPORTANT: for some reason, the `children` setter does not seem
to work as expected... use `Type.setChildren` instead.
The `children` attribute is "settable". For inappropriate types
(e.g. scalar types), setting it will be ignored.
meta : aadict, default: {}
TODO: this is not actually implemented!
Some meta-information about the type. **All** values are
optional. Currently, the following are used by `pyramid_describe`,
but this may change and is also available for external libraries
to use as needed:
* ``referenced``: a list of endpoints where this type is referenced
* ``defined``: a list of endpoints where this type is defined
* ``source``: where this type was defined (extensions only)
* ``classes``: a list of classes to augment this type's rendering
Note that meta-information is ignored when comparing Type's for
equality.
TODO: there is a bit of a "peculiarity" that extension types have a
`value` of TypeRef... (instead of Type). this is because "params"
need to be stored somewhere, but Type's don't *have* params.
probably better would be to store the Type in `base` and put the
params in `value`...
'''
SCALAR = 'scalar'
COMPOUND = 'compound'
LIST = 'list'
DICT = 'dict'
ONEOF = 'oneof'
UNION = 'union'
REF = 'ref'
CONSTANT = 'constant'
EXTENSION = 'extension'
UNKNOWN = 'unknown'
ANY = 'any'
BYTE = 'byte'
BYTES = 'bytes'
BOOLEAN = 'boolean'
INTEGER = 'integer'
NUMBER = 'number'
STRING = 'string'
NULL = 'null'
#----------------------------------------------------------------------------
def __init__(self, base=None, name=None, doc=None, value=None, meta=None, *args, **kw):
# TODO: enable this...
# if not base:
# raise ValueError('type "base" parameter is required')
# if not name:
# raise ValueError('type "name" parameter is required')
if base:
kw['base'] = base
if name:
kw['name'] = name
if doc:
kw['doc'] = doc
if value:
kw['value'] = value
kw['meta'] = aadict(meta or {})
super(Type, self).__init__(*args, **kw)
#----------------------------------------------------------------------------
def clone(self):
value = copy.deepcopy(self.value)
meta = copy.deepcopy(self.meta)
return Type(
base=self.base, name=self.name, doc=self.doc, value=value, meta=meta)
#----------------------------------------------------------------------------
@property
def children(self):
if self.value is None or self.value == []:
return
if isinstance(self.value, (Type, TypeRef)):
if isinstance(self.value, (Type, TypeRef)):
yield self.value
return
if isinstance(self.value, (list, tuple)):
for item in self.value:
if isinstance(item, (Type, TypeRef)):
yield item
return
#----------------------------------------------------------------------------
def is_dict(self):
return self.base == Type.COMPOUND and self.name == Type.DICT \
or self.base == Type.DICT
#----------------------------------------------------------------------------
def is_list(self):
return self.base == Type.COMPOUND and self.name == Type.LIST \
or self.base == Type.LIST
#----------------------------------------------------------------------------
def is_constant(self):
return self.base == Type.CONSTANT
#----------------------------------------------------------------------------
def is_scalar(self):
return self.base == Type.SCALAR
#----------------------------------------------------------------------------
# todo: why does this not get called??? __setattr__ is called instead. ugh.
# hence the reason that it is overridden here... fix!
# ugh. solve this `.children` thing...:
# a) using generators seems to cause more problems than
# it is worth... move .children to return a list.
# b) make setting `.children` work.
@children.setter
def children(self, value):
self.setChildren(value)
def __setattr__(self, key, value):
if key == 'children':
return self.setChildren(value)
return super(Type, self).__setattr__(key, value)
def setChildren(self, value):
if not value:
self.value = None
return self
value = list(value)
if self.base == Type.EXTENSION \
or ( self.base == Type.COMPOUND and self.name in (Type.LIST, Type.REF) ) \
or ( self.base in (Type.LIST, Type.REF) ):
if len(value) > 1:
raise TypeError('type %r only supports one child' % (self,))
if len(value) < 1:
self.value = None
else:
self.value = value[0]
return self
elif ( self.base == Type.COMPOUND and self.name in (Type.ONEOF, Type.UNION, Type.DICT) )\
or ( self.base in (Type.ONEOF, Type.UNION, Type.DICT) ):
self.value = value
else:
raise TypeError('type %r does not support children' % (self,))
return self
#----------------------------------------------------------------------------
def __cmp__(self, target):
if not isinstance(target, self.__class__):
return cmp(self.__class__, target.__class__)
for attr in ('base', 'name', 'doc', 'value'):
cur = cmp(getattr(self, attr), getattr(target, attr))
if cur != 0:
return cur
return 0
#----------------------------------------------------------------------------
def tostruct(self, ref=False):
'''
Returns a JSONifiable structural representation of this Type.
Note that all `meta` information is lost.
'''
ret = dict(name=self.name)
if not ref and self.doc:
ret['doc'] = self.doc
if self.base == Type.CONSTANT:
ret['params'] = dict(constant=True, value=self.value)
elif self.value:
gen = None
if self.base == Type.COMPOUND and self.name in (Type.LIST, Type.REF):
gen = 'item'
elif self.base == Type.COMPOUND and self.name in (Type.ONEOF, Type.UNION, Type.DICT):
gen = 'list'
elif not ref and self.base in (Type.LIST, Type.REF):
gen = 'item'
elif not ref and self.base in (Type.ONEOF, Type.UNION, Type.DICT):
gen = 'list'
if gen == 'item':
ret['params'] = dict(value=self.value.tostruct(ref=True))
elif gen == 'list':
ret['params'] = dict(value=[v.tostruct(ref=True) for v in self.value])
if not ref and self.base in (Type.LIST, Type.REF, Type.ONEOF, Type.UNION, Type.DICT):
ret['base'] = self.base
return ret
#----------------------------------------------------------------------------
def __repr__(self):
ret = '<Type ' + self.base + ':' + self.name
if ( self.base == Type.CONSTANT and self.name == Type.NULL ) or \
( self.value and \
( self.base in (Type.CONSTANT, Type.DICT, Type.EXTENSION) or \
( self.base == Type.COMPOUND \
and self.name in (Type.ONEOF, Type.UNION, Type.LIST, Type.DICT, Type.REF) ) ) ):
ret += ' value='
if isinstance(self.value, dict) and not isinstance(self.value, (Type, TypeRef)):
ret += '{' + ', '.join(
repr(k) + ': ' + repr(self.value[k])
for k in sorted(self.value.keys())) + '}'
else:
ret += repr(self.value)
if self.doc:
ret += ' doc=%r' % (self.doc,)
return ret + '>'
#------------------------------------------------------------------------------
class TypeRef(_aadict):
'''
A `TypeRef` object represents a reference to `Type` object. This
is typically used within dict-like Types that have named references
to other Types. A TypeRef can have the following attributes:
:Attributes:
type : Type
The `Type` object being referenced.
name : str, optional
The symbolic name attributed to this reference (may be None if
unnamed).
doc : str, optional
Any additional documentation associated with this reference
instance.
params : dict, optional
A lookup table of optional parameters, eg:
optional : bool
default : any
'''
#----------------------------------------------------------------------------
def __init__(self, type=None, name=None, doc=None, params=None, *args, **kw):
# TODO: enable this...
# if not type:
# raise ValueError('TypeRef "type" parameter is required')
if type:
kw['type'] = type
if name:
kw['name'] = name
if doc:
kw['doc'] = doc
if params:
kw['params'] = params
super(TypeRef, self).__init__(*args, **kw)
#----------------------------------------------------------------------------
def clone(self):
params = copy.deepcopy(self.params)
type = copy.deepcopy(self.type)
return TypeRef(type=type, name=self.name, doc=self.doc, params=params)
#----------------------------------------------------------------------------
@property
def children(self):
if not self.type:
return
yield self.type
#----------------------------------------------------------------------------
def is_dict(self): return False
def is_list(self): return False
def is_constant(self): return False
def is_scalar(self): return False
#----------------------------------------------------------------------------
def tostruct(self, ref=False):
ret = dict(type=self.type.tostruct(ref=True))
if self.params:
ret['params'] = dict(self.params)
if self.name:
ret['name'] = self.name
if self.doc:
ret['doc'] = self.doc
return ret
#----------------------------------------------------------------------------
def __cmp__(self, target):
if not isinstance(target, self.__class__):
return cmp(self.__class__, target.__class__)
for attr in ('type', 'name', 'doc', 'params'):
cur = cmp(getattr(self, attr), getattr(target, attr))
if cur != 0:
return cur
return 0
#----------------------------------------------------------------------------
def __repr__(self):
ret = '<TypeRef '
if self.name:
ret += self.name + '='
ret += repr(self.type)
if self.params:
ret += ' params=%r' % (self.params,)
if self.doc:
ret += ' doc=%r' % (self.doc,)
return ret + '>'
#------------------------------------------------------------------------------
whitespace_cre = re.compile(r'\s+')
hex_cre = re.compile(r'([a-f0-9][a-f0-9])+', re.IGNORECASE)
constant_cre = re.compile(r'[0-9"\'-{\\[]')
# todo: move this into TypeRegistry as a configurable option...
# ==> and make it depend on declared aliases, etc...
symbol_cre = re.compile(r'[a-z_][a-z0-9_.]*', re.IGNORECASE)
#------------------------------------------------------------------------------
class StringWalker(object):
#----------------------------------------------------------------------------
def __init__(self, string, *args, **kw):
super(StringWalker, self).__init__(*args, **kw)
self._string = string
self.index = 0
#----------------------------------------------------------------------------
@property
def string(self):
return self._string[self.index:]
#----------------------------------------------------------------------------
@property
def length(self):
ret = len(self._string) - self.index
if ret <= 0:
return 0
return ret
#----------------------------------------------------------------------------
def __bool__(self):
return self.length > 0
#----------------------------------------------------------------------------
def eatws(self):
match = whitespace_cre.match(self.string)
if match:
self.index += len(match.group(0))
return self
#----------------------------------------------------------------------------
def peek(self, length=1):
if length <= 0:
raise ValueError('length must be positive')
return self.string[:length]
#----------------------------------------------------------------------------
def startswith(self, string):
return self.peek(len(string)) == string
#----------------------------------------------------------------------------
def read(self, length=1):
if length <= 0:
raise ValueError('length must be positive')
ret = self.string[:length]
self.index += length
return ret
#----------------------------------------------------------------------------
def seek(self, position):
self.index = position
return self
#------------------------------------------------------------------------------
class TypeRegistry(object):
DEFAULT_OPTIONS = {
'extensions' : None,
'commentToken' : '##',
'closure_open' : '(',
'closure_close' : ')',
'oneof_sep' : '|',
'union_sep' : '&',
'customDictTypeRE' : r'^([a-zA-Z_][a-zA-Z0-9_]*\.)*[A-Z][a-zA-Z0-9_]*$',
'unknownTypeRE' : r'^([a-zA-Z_][a-zA-Z0-9_]*\.)*[a-zA-Z0-9_]+$',
}
DEFAULT_ALIASES = {
# constants:
'null' : ['nil', 'none', 'None', 'NULL', 'NIL', 'NONE'],
'true' : ['True', 'TRUE'],
'false' : ['False', 'FALSE'],
# scalars:
'byte' : [],
'bytes' : [],
'boolean' : ['bool'],
'integer' : ['int'],
'number' : ['num', 'float', 'decimal', 'real'],
'string' : ['str'],
# meta / compounds types:
'any' : [],
'oneof' : ['choice', 'enum', 'enumeration', 'select', 'option'],
'union' : ['allof'],
'list' : ['array', 'vector'],
'dict' : ['dictionary', 'hash', 'map', 'hashmap', 'table', 'hashtable'],
'ref' : ['reference'],
}
#----------------------------------------------------------------------------
def __init__(self, options=None, aliases=None, _hack=False):
if _hack:
return
self.options = aadict(self.DEFAULT_OPTIONS).update(options or {})
self._types = dict()
self._autotypes = dict()
self._aliases = dict()
self._dictType_cre = re.compile(self.options.customDictTypeRE)
self._unknownType_cre = re.compile(self.options.unknownTypeRE)
aliases = aliases or self.options.aliases
self.addAliases(self.DEFAULT_ALIASES if aliases is None else aliases)
if aliases is None:
self.addHttpAliases()
for target, val in morph.pick(self.options, prefix='alias.').items():
for source in morph.tolist(val):
self.addAlias(source, target)
if self.options.extensions:
self.loadExtensions(self.options.extensions)
#----------------------------------------------------------------------------
def clone(self):
'''
Creates a copy of this TypeRegistry, where the types are
deep-copied.
'''
ret = TypeRegistry(_hack=True)
ret.options = aadict(self.options)
ret._dictType_cre = self._dictType_cre
ret._unknownType_cre = self._unknownType_cre
ret._aliases = {k : set(v) for k, v in self._aliases.items()}
ret._types = {k : v.clone() for k, v in self._types.items()}
ret._autotypes = {k : v.clone() for k, v in self._autotypes.items()}
return ret
#----------------------------------------------------------------------------
def addAliases(self, aliases):
for target, sources in (aliases or {}).items():
for source in sources:
self.addAlias(source, target)
#----------------------------------------------------------------------------
def addHttpAliases(self):
'''
Loads the HTTP error response codes from pyramid.httpexceptions as
aliases so that they can be used as output or error response types
without needing to define them. Several aliases are loaded per
response code - for example, all of the following will resolve to
a ``403`` response code:
* ``HTTPForbidden``
* ``pyramid.httpexceptions.HTTPForbidden``
'''
for name in dir(pyramid.httpexceptions):
if name.startswith('_'):
continue
try:
sym = getattr(pyramid.httpexceptions, name)
if inspect.isclass(sym) \
and issubclass(sym, pyramid.httpexceptions.WSGIHTTPException):
self.addAlias('pyramid.httpexceptions.' + name, name)
self.registerAutoType(
Type(
base = Type.DICT,
name = name,
doc = _('{error.explanation}', error=sym),
value = [
TypeRef(
name = 'code',
type = Type(base=Type.CONSTANT, name=Type.INTEGER, value=sym.code)),
TypeRef(
name = 'message',
type = Type(base=Type.CONSTANT, name=Type.STRING, value=sym.title)),
],
meta = {
'source' : 'pyramid.httpexceptions',
'classes' : ['source-pyramid-httpexceptions'],
},
))
except Exception:
pass
#----------------------------------------------------------------------------
def addAlias(self, source, target):
# todo: check for cyclical references...
if source in self._types or source in self._aliases:
raise ValueError(
'cannot alias %r to %r: already declared as standalone type' %
(source, target))
for ctarget, csources in self._aliases.items():
if source in csources and target != ctarget:
raise ValueError(
'cannot alias %r to %r: already aliased to %r' %
(source, target, ctarget))
if target not in self._aliases:
self._aliases[target] = set()
self._aliases[target].add(source)
#----------------------------------------------------------------------------
def loadExtensions(self, specs):
for ext in morph.tolist(specs):
self.loadExtension(ext)
#----------------------------------------------------------------------------
def loadExtension(self, spec):
log.debug('loading type registry extensions from: %r', spec)
try:
sym = asset.symbol(spec)
return sym(self)
except (ImportError, AttributeError):
pass
try:
return self.loadExtensionString(asset.load(spec).read(), source=spec)
except (ImportError, AttributeError, ValueError):
pass
return self.loadExtensionString(spec)
#----------------------------------------------------------------------------
def loadExtensionString(self, text, source=None):
from .syntax.numpydoc.parser import Parser
parser = Parser(comment=self.options.commentToken)
for doc, typ in parser.parseMulti(text):
if doc:
log.debug('ignoring unbound extension documentation text: %r', doc)
if not typ:
continue
if isinstance(typ, TypeRef):
typ = Type(base=Type.EXTENSION, name=typ.name, doc=typ.doc, value=
TypeRef(type=typ.type, params=typ.params))
if source:
typ.meta.source = source
log.debug('registering extension type "%s"', typ.name)
self.registerAutoType(typ)
#----------------------------------------------------------------------------
def registerType(self, type):
if not type.doc and not type.value:
type = self.getAuto(type.name) or type
else:
type = self.dereference(type)
# todo: should this check for collision?...
self._types[type.name] = type
return type
#----------------------------------------------------------------------------
def registerAutoType(self, type):
type = self.dereference(type, auto=True)
# todo: should this check for collision?...
self._autotypes[type.name] = type
return type
#----------------------------------------------------------------------------
def dereference(self, type, auto=False):
# TODO: in the end, this is just resolving `unknown` types, but
# should really do a more "complete" deref. the core problem
# is that `pyramid_describe/syntax/numpydoc/merger.py` needs
# to "work well" with this... and it currently does not.
if isinstance(type, TypeRef):
if type.type:
type.type = self.dereference(type.type, auto=auto)
return type
if type.is_constant() or type.is_scalar():
return type
if type.base == Type.UNKNOWN:
typ = self.getAuto(type.name) if auto else self.get(type.name)
if not typ:
raise ValueError(
'invalid reference to unknown/undefined type "%s"' % (type.name,))
type = typ
return type
type.setChildren(
self.dereference(typ, auto=auto) for typ in type.children)
return type
#----------------------------------------------------------------------------
def resolveAliases(self, symbol):
for key, val in self._aliases.items():
if symbol in val:
return self.resolveAliases(key)
return symbol
#----------------------------------------------------------------------------
def get(self, symbol):
symbol = self.resolveAliases(symbol)
if symbol not in self._types and symbol in self._autotypes:
# TODO: what about promoting other auto types that are
# referenced by self._autotypes[symbol]???
self._types[symbol] = self._autotypes[symbol]
return self._types.get(symbol)
#----------------------------------------------------------------------------
def getAuto(self, symbol):
symbol = self.resolveAliases(symbol)
return self._autotypes.get(symbol)
#----------------------------------------------------------------------------
def typeNames(self):
return sorted(self._types.keys(), key=str.lower)
#----------------------------------------------------------------------------
def types(self):
return sorted(self._types.values(), key=lambda typ: typ.name.lower())
#----------------------------------------------------------------------------
def prepareParams(self, type):
from . import params
return params.prepare(type.params)
#----------------------------------------------------------------------------
# TODO: MOVE THIS INTO pyramid_describe/syntax/numpydoc/parser.py
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
def parseType(self, spec, complete=True):
src = StringWalker(spec)
typ = self._parseType(src)
if src.string and src.string.startswith(self.options.commentToken):
src.read(len(src.string))
if not typ or src.index <= 0:
raise ValueError('could not parse %r' % (spec,))
rem = src.string
if not complete:
return (typ, rem)
if not rem:
return typ
raise ValueError(
'Extra data after position %d (%r)' % (src.index, src.string))
#----------------------------------------------------------------------------
def _parseType(self, source):
typ = self._parseType_next(source)
if not typ:
return typ
if not source.eatws():
return typ
for seqtyp, seqtok in [
(Type.ONEOF, self.options.oneof_sep),
(Type.UNION, self.options.union_sep),
]:
if source.startswith(seqtok):
return self._parseType_sequence(source, typ, seqtyp, seqtok)
return typ
#----------------------------------------------------------------------------
def _parseType_sequence(self, source, current, base, token):
while True:
if not source.eatws():
return current
if not source.startswith(token):
return current
# raise ValueError(
# 'cannot parse after %s separator (%r) at position %d (%r)'
# % (base, token, source.index, source.string))
idx = source.index
source.read(len(token))
if current.name != base:
current = Type(base=Type.COMPOUND, name=base, value=[current])
styp = self._parseType_next(source)
if not styp:
source.seek(idx)
raise ValueError(
'cannot parse after %s separator (%r) at position %d (%r)'
% (base, token, source.index, source.string))
current.value.append(styp)
#----------------------------------------------------------------------------
def _peekSymbol(self, source):
match = symbol_cre.match(source.string)
if not match:
return None
return match.group(0)
#----------------------------------------------------------------------------
def _parseType_next(self, source):
if not source.eatws():
return None
if source.startswith(self.options.closure_open):
idx = source.index
source.read(1)
typ = self._parseType(source)
if not typ:
source.seek(idx)
raise ValueError(
'cannot parse content of grouping (%r) at position %d (%r)' \
% (self.options.closure_open, source.index, source.string))
source.eatws()
if source.read(1) != self.options.closure_close:
source.seek(idx)
raise ValueError(
'unterminated %r at position %d (%r)' \
% (self.options.closure_open, source.index, source.string))
return typ
token = self._peekSymbol(source)
if token:
typ = self._parseType_token(source, token)
if typ:
return typ
if constant_cre.match(source.string):
typ = self._parseType_constant(source)
if typ:
return typ
return None
#----------------------------------------------------------------------------
def _parseType_token(self, source, token):
symbol = self.resolveAliases(token)
if hasattr(self, '_parseType_symbol_' + symbol):
typ = getattr(self, '_parseType_symbol_' + symbol)(source, token)
if typ:
return typ
if hasattr(self, '_parseType_compound_' + symbol):
value = None
source.read(len(token))
source.eatws()
if source.startswith(self.options.closure_open):
value = self._parseType(source)
typ = getattr(self, '_parseType_compound_' + token)(source, token, value)
if typ:
return typ
typ = self._parseType_registered(source, token)
if typ:
return typ
typ = self._parseType_custom(source, token)
if typ:
return typ
typ = self._parseType_unknown(source, token)
if typ:
return typ
return None
#----------------------------------------------------------------------------
def _makeParseType(**kw):
def _method(self, source, token):
source.read(len(token))
return Type(**kw)
return _method
_parseType_symbol_any = _makeParseType(base=Type.SCALAR, name=Type.ANY)
_parseType_symbol_byte = _makeParseType(base=Type.SCALAR, name=Type.BYTE)
_parseType_symbol_bytes = _makeParseType(base=Type.SCALAR, name=Type.BYTES)
_parseType_symbol_boolean = _makeParseType(base=Type.SCALAR, name=Type.BOOLEAN)
_parseType_symbol_integer = _makeParseType(base=Type.SCALAR, name=Type.INTEGER)
_parseType_symbol_number = _makeParseType(base=Type.SCALAR, name=Type.NUMBER)
_parseType_symbol_string = _makeParseType(base=Type.SCALAR, name=Type.STRING)
_parseType_symbol_null = _makeParseType(base=Type.CONSTANT, name=Type.NULL, value=None)
_parseType_symbol_true = _makeParseType(base=Type.CONSTANT, name=Type.BOOLEAN, value=True)
_parseType_symbol_false = _makeParseType(base=Type.CONSTANT, name=Type.BOOLEAN, value=False)
# this is a "parseType_symbol" not "parseType_compound" because
# you can't declare it as "dict(...)"...
# todo: is this *absolutely* true?...
_parseType_symbol_dict = _makeParseType(base=Type.COMPOUND, name=Type.DICT)
#----------------------------------------------------------------------------
def _parseType_constant(self, source):
# TODO: convert this to use constant.parse()!...
# (the issue is that constant.parse auto-converts hex values,
# which i want to detect somehow to set the type to BYTE or BYTES)
if source.startswith('0x'):
return self._parseType_constant_hex(source)
if source.peek() in '01234567890-':
return self._parseType_constant_num(source)
if source.peek() in '\'"{[':
return self._parseType_constant_yaml(source)
return None
#----------------------------------------------------------------------------
def _parseType_native(self, target):
if isinstance(target, types.NoneType):
return Type(base=Type.CONSTANT, name=Type.NULL, value=None)
if isinstance(target, types.BooleanType):
return Type(base=Type.CONSTANT, name=Type.BOOLEAN, value=target)
if isinstance(target, (types.IntType, types.LongType)):
return Type(base=Type.CONSTANT, name=Type.INTEGER, value=target)
if isinstance(target, types.FloatType):
return Type(base=Type.CONSTANT, name=Type.NUMBER, value=target)
if isinstance(target, six.string_types) \
or isinstance(target, six.text_type):
return Type(base=Type.CONSTANT, name=Type.STRING, value=target)
if isinstance(target, six.binary_type):
return Type(base=Type.CONSTANT, name=Type.BYTES, value=target)
if isinstance(target, (types.TupleType, types.ListType)):
return Type(base=Type.CONSTANT, name=Type.LIST, value=list(target))
if isinstance(target, types.DictType):
return Type(base=Type.CONSTANT, name=Type.DICT, value=dict(target))
raise ValueError('unknown constant type: %r' % (target,))
#----------------------------------------------------------------------------
def _parseType_constant_hex(self, source):
if not source.string.startswith('0x'):
return None
match = hex_cre.match(source.string[2:])
if not match:
return None
data = match.group(0)
source.read(len(data) + 2)
if len(data) == 2:
return Type(base=Type.CONSTANT, name=Type.BYTE, value=data.decode('hex'))
return Type(base=Type.CONSTANT, name=Type.BYTES, value=data.decode('hex'))
#----------------------------------------------------------------------------
def _parseType_constant_num(self, source):
# NOTE: using json, not yaml, because yaml is far too lenient.
# for example ``78 !foo~`` would be interpreted as the entire
# *string* "78 !foo~", not the number 78 + plus extra stuff...
ret, rem = util.jsonParse(source.string, partial=True)
source.read(source.length - len(rem))
return self._parseType_native(ret)
#----------------------------------------------------------------------------
_yaml_error_cre = re.compile(
r'^ in "<string>", line 1, column (\d+):$', flags=re.MULTILINE)
def _parseType_constant_yaml(self, source):
try:
ret = yaml.load(source.string)
source.read(source.length)
return self._parseType_native(ret)
except (yaml.parser.ParserError, yaml.parser.ScannerError) as exc:
idxs = [
val for val in [
int(m.group(1)) - 1
for m in self._yaml_error_cre.finditer(str(exc))]
if val > 0]
if not idxs:
raise
for idx in reversed(sorted(idxs)):
try:
ret = yaml.load(source.string[:idx])
except Exception as exc:
continue
source.read(idx)
return self._parseType_native(ret)
raise
#----------------------------------------------------------------------------
def _parseType_compound_oneof(self, source, token, value):
kw = {} if value is None else {'value': value}
return Type(base=Type.COMPOUND, name=Type.ONEOF, **kw)
#----------------------------------------------------------------------------
def _parseType_compound_union(self, source, token, value):
kw = {} if value is None else {'value': value}
return Type(base=Type.COMPOUND, name=Type.UNION, **kw)
#----------------------------------------------------------------------------
def _parseType_compound_list(self, source, token, value):
return Type(base=Type.COMPOUND, name=Type.LIST, value=value)
#----------------------------------------------------------------------------
def _parseType_compound_ref(self, source, token, value):
return Type(base=Type.COMPOUND, name=Type.REF, value=value)
#----------------------------------------------------------------------------
def _parseType_compound_dict(self, source, token, value):
for val in value or []:
if not isinstance(val, TypeRef) or not val.name:
raise ValueError(
'dict-type children must be named type references, not %r' % (val,))
return Type(base=Type.COMPOUND, name=Type.DICT, value=value)
#----------------------------------------------------------------------------
def _parseType_registered(self, source, token):
target = self.resolveAliases(token)
target = self._types.get(target) or self._autotypes.get(target)
if not target:
return None
source.read(len(token))
return Type(base=target.base, name=token)
#----------------------------------------------------------------------------
def _parseType_custom(self, source, token):
if not self.isCustomDictType(token):
return None
source.read(len(token))
# todo: how to detect non-dict custom types?...
# perhaps simple: NOT the parser's job.
token = self.resolveAliases(token)
return Type(base=Type.DICT, name=token)
#----------------------------------------------------------------------------
def _parseType_unknown(self, source, token):
if not self.isUnknownType(token):
return None
source.read(len(token))
# todo: how to detect invalid types?...
# perhaps simple: NOT the parser's job.
token = self.resolveAliases(token)
return Type(base=Type.UNKNOWN, name=token)
#----------------------------------------------------------------------------
def isType(self, name):
# todo: this is a hack to allow sharing of the regex used to
# determine whether or not a symbol is an acceptable
# type in pyramid_describe/syntax/numpydoc/extractor.py
# fix!
match = symbol_cre.match(name)
if match and match.group(0) == name:
return True
if self.isCustomDictType(name):
return True
return self.isUnknownType(name)
#----------------------------------------------------------------------------
def isCustomDictType(self, name):
return bool(self._dictType_cre.match(name))
#----------------------------------------------------------------------------
def isUnknownType(self, name):
return bool(self._unknownType_cre.match(name))
#------------------------------------------------------------------------------
# end of $Id$
# $ChangeLog$
#------------------------------------------------------------------------------
| |
#!/usr/bin/env python
import sys, os, time
import difflib
import unittest
import sublime
"""
Detect SublimeText version to deal w/ v2 vs v3 deltas
"""
version = sublime.version()
print("Testing with SublimeText version: %s" % version)
"""
For plugin helper functions, load them so we can hit functionality
within the plugin
"""
if version < "3000":
clipboard_diff = sys.modules["clipboard_diff"]
else:
clipboard_diff = sys.modules["sublime-clipboard-diff.clipboard_diff"]
"""
Python 2 vs Python 3 - Compatibility:
* reduce() is functools.reduce
"""
try:
# Python 2
_reduce = reduce
except NameError:
# Python 3
import functools
_reduce = functools.reduce
class TestSelectionDiffPlugin(unittest.TestCase):
"""
Unit tests to validate sublime-selection-diff plugin methods
"""
test_lines_0 = "line 0\nline 1\nline 2"
test_lines_1 = "line A\nline 1"
"""
Helper functions
"""
def getDiffExpectedResult(self, diff_fn, str_a, str_b, file_a, file_b):
result = diff_fn(
clipboard_diff.getLinesHelper(str_a),
clipboard_diff.getLinesHelper(str_b),
file_a, file_b)
return _reduce(lambda acc, x: acc + x, result, "")
def runSimpleViewCommand(self, cmd):
if self.test_view:
self.test_view.run_command(cmd)
def insertTextToTestView(self, text):
self.test_view.run_command("insert", {"characters": text})
"""
Setup / Teardown
"""
def setUp(self):
"""
Common setUp() for TestSelectionDiffPlugin
"""
self.test_view = sublime.active_window().new_file()
self.test_view.set_name("Test View")
self.test_view.set_scratch(True)
self.settings = sublime.load_settings("clipboard_diff.sublime-settings")
def tearDown(self):
"""
Common tearDown() for TestSelectionDiffPlugin
"""
test_window = self.test_view.window()
test_window.focus_view(self.test_view)
test_window.run_command("close_file")
"""
Helper Function Tests:
"""
def test_get_selection_str(self):
"""
Validate the selectionToString helper
"""
self.insertTextToTestView(self.test_lines_0)
self.runSimpleViewCommand("select_all")
selection_txt = clipboard_diff.selectionToString(self.test_view)
self.assertEqual(self.test_lines_0, selection_txt)
def test_line_helper(self):
"""
Validates the line helper
"""
lines = clipboard_diff.getLinesHelper(self.test_lines_0)
self.assertEqual(len(lines), 3)
self.assertEqual(lines[0], "line 0\n")
self.assertEqual(lines[1], "line 1\n")
self.assertEqual(lines[2], "line 2\n")
if version < "3000":
"""
Tests ST2 only stuff
"""
def test_write_to_view(self):
"""
Validates writing to a view
This is limited to < ST3 because the edit object is only
passed into the TextCommand, and view.begin_edit() does
not exist anymore.
"""
lines = clipboard_diff.getLinesHelper(self.test_lines_0)
edit = self.test_view.begin_edit()
clipboard_diff.writeLinesToViewHelper(self.test_view, edit, lines)
self.test_view.end_edit(edit)
self.runSimpleViewCommand("select_all")
current_selection = self.test_view.sel()
selected_text = clipboard_diff.selectionToString(self.test_view)
self.assertEqual(self.test_lines_0 + "\n", selected_text)
else:
"""
> ST 2 Tests
"""
def test_clipboard_diff_view_syntax(self):
"""
Validates that the newly opened tab is of the `Diff` syntax
"""
self.runSimpleViewCommand("clipboard_diff")
diff_view = sublime.active_window().active_view()
diff_syntax = diff_view.settings().get('syntax')
diff_view.window().run_command("close_file")
self.assertEqual("Packages/Diff/Diff.tmLanguage", diff_syntax)
"""
Plugin Tests:
"""
def test_clipboard_unified_diff(self):
"""
Validates the `clipboard_diff` command
"""
self.insertTextToTestView(self.test_lines_0)
self.runSimpleViewCommand("select_all")
self.runSimpleViewCommand("cut")
self.insertTextToTestView(self.test_lines_1)
self.runSimpleViewCommand("select_all")
self.runSimpleViewCommand("clipboard_diff")
diff_view = sublime.active_window().active_view()
diff_text = diff_view.substr(sublime.Region(0, diff_view.size()))
diff_view.window().run_command("close_file")
expected_diff_text = self.getDiffExpectedResult(difflib.unified_diff,
self.test_lines_0, self.test_lines_1,
"Clipboard", "Selection")
self.assertEqual(expected_diff_text, diff_text)
def test_clipboard_unified_diff_same_selection(self):
"""
Validates the `clipboard_diff` command when run
with the same selection
"""
self.insertTextToTestView(self.test_lines_0)
self.runSimpleViewCommand("select_all")
self.runSimpleViewCommand("copy")
self.runSimpleViewCommand("clipboard_diff")
diff_view = sublime.active_window().active_view()
diff_text = diff_view.substr(sublime.Region(0, diff_view.size()))
diff_view.window().run_command("close_file")
self.assertEqual("\n Selection and Clipboard Match!\n", diff_text)
def test_clipboard_context_diff(self):
"""
Validates that we can change the diff type to context
diff (from difflib)
"""
self.insertTextToTestView(self.test_lines_0)
self.runSimpleViewCommand("select_all")
self.runSimpleViewCommand("cut")
self.insertTextToTestView(self.test_lines_1)
self.runSimpleViewCommand("select_all")
old_type = self.settings.get("diff_type")
self.settings.set("diff_type", "context")
self.runSimpleViewCommand("clipboard_diff")
self.settings.set("diff_type", old_type)
diff_view = sublime.active_window().active_view()
diff_text = diff_view.substr(sublime.Region(0, diff_view.size()))
diff_view.window().run_command("close_file")
expected_diff_text = self.getDiffExpectedResult(difflib.context_diff,
self.test_lines_0, self.test_lines_1,
"Clipboard", "Selection")
self.assertEqual(expected_diff_text, diff_text)
def test_clipboard_setting_file_names(self):
"""
Validates that clipboard_file_name and selection_file_name
can be set to success
"""
self.insertTextToTestView(self.test_lines_0)
self.runSimpleViewCommand("select_all")
self.runSimpleViewCommand("cut")
self.insertTextToTestView(self.test_lines_1)
self.runSimpleViewCommand("select_all")
old_clipboard_file_name = self.settings.get("clipboard_file_name")
old_selection_file_name = self.settings.get("selection_file_name")
self.settings.set("clipboard_file_name", "CLIPBOARD IS FUN")
self.settings.set("selection_file_name", "SELECTION IS BETTER")
self.runSimpleViewCommand("clipboard_diff")
self.settings.set("clipboard_file_name", old_clipboard_file_name)
self.settings.set("selection_file_name", old_selection_file_name)
diff_view = sublime.active_window().active_view()
diff_text = diff_view.substr(sublime.Region(0, diff_view.size()))
diff_view.window().run_command("close_file")
expected_diff_text = self.getDiffExpectedResult(difflib.unified_diff,
self.test_lines_0, self.test_lines_1,
"CLIPBOARD IS FUN", "SELECTION IS BETTER")
self.assertEqual(expected_diff_text, diff_text)
| |
# -*- coding: utf-8 -*-
from contextlib import closing
from pyramid import testing
import pytest
from journal import connect_db
from journal import DB_SCHEMA
import datetime
from journal import INSERT_ENTRY
import os
from cryptacular.bcrypt import BCRYPTPasswordManager
from webtest import AppError
TEST_DSN = 'dbname=test_learning_journal user=aabulota'
INPUT_BTN = '<input type="submit" value="Share" name="Share"/>'
READ_ENTRY = """SELECT * FROM entries
"""
def init_db(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute(DB_SCHEMA)
db.commit()
def clear_db(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute("DROP TABLE entries")
db.commit()
def clear_entries(settings):
with closing(connect_db(settings)) as db:
db.cursor().execute("DELETE FROM entries")
db.commit()
def run_query(db, query, params=(), get_results=True):
cursor = db.cursor()
cursor.execute(query, params)
db.commit()
results = None
if get_results:
results = cursor.fetchall()
return results
@pytest.fixture(scope='session')
def db(request):
"""set up and tear down a database"""
settings = {'db': TEST_DSN}
init_db(settings)
def cleanup():
clear_db(settings)
request.addfinalizer(cleanup)
return settings
@pytest.yield_fixture(scope='function')
def req_context(db, request):
"""mock a request with a database attached"""
settings = db
req = testing.DummyRequest()
with closing(connect_db(settings)) as db:
req.db = db
req.exception = None
yield req
# after a test has run, we clear out entries for isolation
clear_entries(settings)
@pytest.fixture(scope='function')
def app(db, request):
from journal import main
from webtest import TestApp
os.environ['DATABASE_URL'] = TEST_DSN
app = main()
def cleanup():
settings = {'db': TEST_DSN}
clear_entries(settings)
request.addfinalizer(cleanup)
return TestApp(app)
@pytest.fixture(scope='function')
def entry(db, request):
"""provide a single entry in the database"""
settings = db
now = datetime.datetime.utcnow()
expected = ('Test Title', 'Test Text', now)
with closing(connect_db(settings)) as db:
run_query(db, INSERT_ENTRY, expected, False)
db.commit()
def cleanup():
clear_entries(settings)
request.addfinalizer(cleanup)
return expected
@pytest.fixture(scope='function')
def auth_req(request):
manager = BCRYPTPasswordManager()
settings = {
'auth.username': 'admin',
'auth.password': manager.encode('secret'),
}
testing.setUp(settings=settings)
req = testing.DummyRequest()
def cleanup():
testing.tearDown()
request.addfinalizer(cleanup)
return req
def test_empty_listing(app):
response = app.get('/')
assert response.status_code == 200
actual = response.body
expected = 'No entries here so far'
assert expected in actual
def test_listing(app, entry):
response = app.get('/')
assert response.status_code == 200
actual = response.body
for expected in entry[:2]:
assert expected in actual
def test_detail_listing(app, entry, req_context):
item = run_query(req_context.db, READ_ENTRY)
response = app.get('/detail/{}'.format(item[0][0]))
assert response.status_code == 200
actual = response.body
for expected in entry[:2]:
assert expected in actual
def test_read_entries_empty(req_context):
# call the function under test
from journal import read_entries
result = read_entries(req_context)
# make assertions about the result
assert 'entries' in result
assert len(result['entries']) == 0
def test_read_entries(req_context):
# prepare data for testing
now = datetime.datetime.utcnow()
expected = ('Test Title', 'Test Text', now)
run_query(req_context.db, INSERT_ENTRY, expected, False)
# call the function under test
from journal import read_entries
result = read_entries(req_context)
# make assertions about the result
assert 'entries' in result
assert len(result['entries']) == 1
for entry in result['entries']:
assert expected[0] == entry['title']
assert '<p>{}</p>'.format(expected[1]) == entry['text']
for key in 'id', 'created':
assert key in entry
def test_read_entry(req_context):
# prepare data for testing
now = datetime.datetime.utcnow()
expected = ('Test Title', 'Test Text', now)
run_query(req_context.db, INSERT_ENTRY, expected, False)
item = run_query(req_context.db, READ_ENTRY)
req_context.matchdict = {'id': item[0][0]}
from journal import read_entry
result = read_entry(req_context)
# make assertions about the result
assert 'entry' in result
assert len(result['entry']) == 4
assert expected[0] == result['entry']['title']
assert '<p>{}</p>'.format(expected[1]) == result['entry']['text']
for key in 'id', 'created':
assert key in result['entry']
def test_write_entry(req_context):
from journal import write_entry
fields = ('title', 'text')
expected = ('Test Title', 'Test Text')
req_context.params = dict(zip(fields, expected))
# assert that there are no entries when we start
rows = run_query(req_context.db, READ_ENTRY)
assert len(rows) == 0
write_entry(req_context)
# manually commit so we can see the entry on query
req_context.db.commit()
rows = run_query(req_context.db, "SELECT title, text FROM entries")
assert len(rows) == 1
actual = rows[0]
for idx, val in enumerate(expected):
assert val == actual[idx]
def test_edit_entry(req_context):
from journal import edit_entry
from journal import write_entry
fields = ('title', 'text')
original = ('Test Title', 'Test Text')
req_context.params = dict(zip(fields, original))
write_entry(req_context)
req_context.db.commit()
rows = run_query(req_context.db, READ_ENTRY)
assert len(rows) == 1
actual = rows[0][1:3]
for idx, val in enumerate(original):
assert val == actual[idx]
req_context.matchdict = {'id': rows[0][0]}
expected = ('New Title', 'New Text')
req_context.params = dict(zip(fields, expected))
edit_entry(req_context)
req_context.db.commit()
rows = run_query(req_context.db, "SELECT title, text FROM entries")
assert len(rows) == 1
actual = rows[0]
for idx, val in enumerate(expected):
assert val == actual[idx]
def test_post_to_add_view(app):
entry_data = {
'title': 'Hello there',
'text': 'This is a post',
}
username, password = ('admin', 'secret')
login_helper(username, password, app)
response = app.post('/add', params=entry_data, status='3*')
redirected = response.follow()
actual = redirected.body
for expected in entry_data.values():
assert expected in actual
def test_post_to_add_view_unauthorized(app):
entry_data = {
'title': 'Hello there',
'text': 'This is a post',
}
with pytest.raises(AppError):
app.post('/add', params=entry_data, status='3*')
def test_post_to_edit_view(app, entry, req_context):
entry_data = {
'title': 'Hello there',
'text': 'This is a post',
}
username, password = ('admin', 'secret')
login_helper(username, password, app)
item = run_query(req_context.db, READ_ENTRY)
response = app.post('/editview/{}'.format(
item[0][0]), params=entry_data, status='3*')
redirected = response.follow()
actual = redirected.body
for expected in entry_data.values():
assert expected in actual
def test_post_to_edit_view_unauthorized(app, entry, req_context):
entry_data = {
'title': 'Hello there',
'text': 'This is a post',
}
username, password = ('admin', 'wrong')
login_helper(username, password, app)
item = run_query(req_context.db, READ_ENTRY)
with pytest.raises(AppError):
app.post('/editview/{}'.format(
item[0][0]), params=entry_data, status='3*')
def test_do_login_success(auth_req):
from journal import do_login
auth_req.params = {'username': 'admin', 'password': 'secret'}
assert do_login(auth_req)
def test_do_login_bad_pass(auth_req):
from journal import do_login
auth_req.params = {'username': 'admin', 'password': 'wrong'}
assert not do_login(auth_req)
def test_do_login_bad_user(auth_req):
from journal import do_login
auth_req.params = {'username': 'bad', 'password': 'secret'}
assert not do_login(auth_req)
def test_do_login_missing_params(auth_req):
from journal import do_login
for params in ({'username': 'admin'}, {'password': 'secret'}):
auth_req.params = params
with pytest.raises(ValueError):
do_login(auth_req)
def login_helper(username, password, app):
"""encapsulate app login for reuse in tests
Accept all status codes so that we can make assertions in tests
"""
login_data = {'username': username, 'password': password}
return app.post('/login', params=login_data, status='*')
def test_start_as_anonymous(app):
response = app.get('/', status=200)
actual = response.body
assert INPUT_BTN not in actual
def test_login_success(app):
username, password = ('admin', 'secret')
redirect = login_helper(username, password, app)
assert redirect.status_code == 302
response = redirect.follow()
assert response.status_code == 200
actual = response.body
assert INPUT_BTN in actual
def test_login_fails(app):
username, password = ('admin', 'wrong')
response = login_helper(username, password, app)
assert response.status_code == 200
actual = response.body
assert "Login Failed" in actual
assert INPUT_BTN not in actual
def test_logout(app):
# re-use existing code to ensure we are logged in when we begin
test_login_success(app)
redirect = app.get('/logout', status="3*")
response = redirect.follow()
assert response.status_code == 200
actual = response.body
assert INPUT_BTN not in actual
def test_post_with_markdown(app):
entry_data = {
'title': 'Hello there',
'text': '###Header',
}
username, password = ('admin', 'secret')
login_helper(username, password, app)
response = app.post('/add', params=entry_data, status='3*')
redirected = response.follow()
actual = redirected.body
assert '<h3>Header</h3>' in actual
def test_post_with_codeblock(app):
entry_data = {
'title': 'Hello there',
'text': '```python\nfor i in list:\nx = y**2\nprint(x)\n```',
}
username, password = ('admin', 'secret')
login_helper(username, password, app)
response = app.post('/add', params=entry_data, status='3*')
redirected = response.follow()
actual = redirected.body
assert '<div class="codehilite"><pre>' in actual
| |
import uuid
import math
import traceback
from flask import Blueprint, request, make_response, jsonify, g
from flask_restful import Api, Resource, url_for, reqparse
from helpers import recipe_to_json, json_to_recipe
from api import bcrypt, db
from api.models.user import User
from api.models.recipe import Recipe
from api.models.rating import Rating
from api.models.ingredient import Ingredient, RecipeIngredient, PantryIngredient
from api.decorators import is_logged_in
recipe_blueprint = Blueprint('recipe', __name__)
recipe_api = Api(recipe_blueprint)
class SearchResource(Resource):
"""
Recipe search resource
"""
decorators = [is_logged_in]
def get(self):
"""
Search for ingredients that match the user's criteria and pantry contents
This method is hacked together and should be completely rewritten.
Not kidding, it's awful.
Seriously.
"""
query = request.args.get('terms')
filter_ = request.args.get('filter')
if filter_ == 'true':
user = User.query.get(g.user_id)
result = PantryIngredient.query.filter(PantryIngredient.user_id == user.id).all()
ingredients = {}
for i in result:
ingredients[i.ingredient_id.hex] = i.value
result = RecipeIngredient.query.filter(
(RecipeIngredient.ingredient_id.in_(ingredients.keys()))
)\
.all()
result = [i for i in result if i.value <= ingredients[i.ingredient_id.hex]]
matches = {}
expected = {}
for i in result:
if i.recipe_id.hex in matches:
matches[i.recipe_id.hex] += 1
else:
expected[i.recipe_id.hex] = len(RecipeIngredient.query.filter(RecipeIngredient.recipe_id == i.recipe_id.hex).all())
matches[i.recipe_id.hex] = 1
recipe_ids = []
for key in matches:
if expected[key] <= matches[key]:
recipe_ids.append(key)
if query:
recipes = Recipe.query \
.filter(Recipe.id.in_(recipe_ids)) \
.search(query.replace('+', ' ')) \
.all()
else:
recipes = Recipe.query.filter(Recipe.id.in_(recipe_ids)).all()
[recipe_to_json(r, make_json=False, verbose=False) for r in recipes]
responseObject = {
'status': 'success',
'data': {
'recipes': [recipe_to_json(r, make_json=False, verbose=False) for r in recipes]
}
}
return make_response(jsonify(responseObject), 200)
else:
if query:
recipes = Recipe.query.search(query.replace('+', ' ')).all()
else:
recipes = Recipe.query.all()
responseObject = {
'status': 'success',
'data': {
'recipes': [recipe_to_json(r, make_json=False, verbose=False) for r in recipes]
}
}
return make_response(jsonify(responseObject), 200)
class RateResource(Resource):
"""
Recipe rating resource
"""
decorators = [is_logged_in]
def put(self, recipe_id):
try:
put_data = request.get_json()
recipe = Recipe.query.get(recipe_id)
if not recipe:
responseObject = {
'status': 'fail',
'message': 'Recipe %s not found.' % recipe_id
}
return make_response(jsonify(responseObject), 404)
value = float(put_data['value'])
rating = Rating.query.filter(Rating.user_id == g.user_id, Rating.recipe_id == recipe.id).first()
if rating:
new_value = (recipe.rating * recipe.num_ratings - recipe.rating + value) / recipe.num_ratings
rating.value = value
recipe.rating = new_value
db.session.commit()
else:
rating = Rating(user=g.user, recipe=recipe, value=value)
db.session.add(rating)
#### POTENTIAL RACE CONDITIONS ###
if recipe.num_ratings == 0:
recipe.num_ratings = 1
recipe.rating = value
else:
new_rating = (recipe.rating * recipe.num_ratings + value) / (recipe.num_ratings + 1)
recipe.rating = new_rating
recipe.num_ratings = recipe.num_ratings + 1
db.session.commit()
responseObject = {
'status': 'success'
}
return make_response(jsonify(responseObject), 200)
except KeyError:
responseObject = {
'status': 'fail',
'message': 'No rating specified'
}
return make_response(jsonify(responseObject), 400)
except ValueError:
responseObject = {
'status': 'fail',
'message': 'Invalid rating value'
}
return make_response(jsonify(responseObject), 400)
except Exception as e:
print(e)
traceback.print_exc()
class CreateResource(Resource):
"""
Recipe creation resource
"""
decorators = [is_logged_in]
def post(self):
try:
post_data = request.get_json()
if 'rating' in post_data:
post_data['rating'] = None
recipe = Recipe.query.filter(Recipe.name == post_data['name']).first()
if not recipe:
recipe = json_to_recipe(post_data, creator=g.user)
db.session.commit()
responseObject = {
'status': 'success',
'recipe_id': recipe.id.hex
}
return make_response(jsonify(responseObject), 200)
else:
responseObject = {
'status': 'fail',
'message': 'Recipe with name %s already exists.' % recipe.name
}
return make_response(jsonify(responseObject), 202)
except KeyError:
responseObject = {
'status': 'fail',
'message': 'Invalid recipe information provided.'
}
return make_response(jsonify(responseObject), 400)
class PrepareResource(Resource):
"""
Preparing a recipe deducts its ingredients from the User's pantry
"""
decorators = [is_logged_in]
def put(self, recipe_id):
user = g.user
user_id = g.user_id
recipe = Recipe.query.get(recipe_id)
if recipe_id is None:
responseObject = {
'status': 'fail',
'message': 'Recipe does not exist.'
}
return make_response(jsonify(responseObject), 404)
recipe_ingredients = RecipeIngredient.query.filter(RecipeIngredient.recipe_id == recipe.id).all()
user_ingredients = PantryIngredient.query.filter(PantryIngredient.user_id == user_id).all()
user_dict = {}
for ui in user_ingredients:
user_dict[ui.ingredient_id] = ui
has_all_ingredients = True
changed = []
for ri in recipe_ingredients:
ui = user_dict.get(ri.ingredient_id)
if ui:
if math.isclose(ui.value, 0.0): continue
difference = ui.value - ri.value
ui.value = difference if difference > 0 else 0
changed.append(ui)
else:
has_all_ingredients = False
db.session.commit()
if len(changed) == 0:
responseObject = {
'status': 'fail',
'message': 'User has no ingredients in recipe'
}
return make_response(jsonify(responseObject), 400)
responseObject = {
'status': 'success',
'has_all_ingredients': has_all_ingredients
}
return make_response(jsonify(responseObject), 200)
class ModifyResource(Resource):
"""
Recipe modification resource
"""
decorators = [is_logged_in]
def patch(self, recipe_id):
pass
def delete(self, recipe_id):
pass
class DetailsResource(Resource):
"""
Recipe details resource
"""
def get(self, recipe_id):
try:
uuid.UUID(hex=recipe_id)
recipe = Recipe.query.get(recipe_id)
if recipe:
responseObject = {
'status': 'success',
'data': recipe_to_json(recipe, make_json=False)
}
return make_response(jsonify(responseObject), 200)
else:
responseObject = {
'status': 'fail',
'message': 'Recipe %s not found.' % recipe_id
}
return make_response(jsonify(responseObject), 404)
except ValueError:
responseObject = {
'status': 'fail',
'message': '%s is not a valid recipe id.' % recipe_id
}
return make_response(jsonify(responseObject), 400)
recipe_api.add_resource(SearchResource, '/api/recipe/search')
recipe_api.add_resource(CreateResource, '/api/recipe')
recipe_api.add_resource(ModifyResource, '/api/recipe/<string:recipe_id>')
recipe_api.add_resource(DetailsResource, '/api/recipe/<string:recipe_id>')
recipe_api.add_resource(RateResource, '/api/recipe/<string:recipe_id>/rate')
recipe_api.add_resource(PrepareResource, '/api/recipe/<string:recipe_id>/prepare')
| |
# -*- coding: utf-8 -*-
u"""
sphinxcontrib.ros.mssage
~~~~~~~~~~~~~~~~~~~~~~~~~
:ros:msg: directive.
:copyright: Copyright 2015 by Tamaki Nishino.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import codecs
import re
from sphinx.locale import l_
from docutils import nodes
from docutils.statemachine import StringList
from docutils.parsers.rst import directives
from sphinx.util.docfields import TypedField, GroupedField
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import (Punctuation, Literal,
Text, Comment, Operator, Name, Number, Keyword)
from .base import ROSObjectDescription
BUILTIN_TYPES = ('bool', 'byte',
'int8', 'uint8', 'int16', 'uint16',
'int32', 'uint32', 'int64', 'uint64',
'float32', 'float64', 'string', 'time', 'duration', 'Header')
TYPE_SUFFIX = 'type'
VALUE_SUFFIX = 'value'
def split_blocks(strings):
u"""Split StringList into list of StringList
"""
blocks = [StringList()]
for item in strings.xitems(): # (source, offset, value)
if item[2].strip():
blocks[-1].append(item[2], source=item[0], offset=item[1])
elif len(blocks[-1]):
blocks.append(StringList())
# remove the last block if empty
if len(blocks[-1]) == 0:
del blocks[-1]
return blocks
def join_blocks(blocks):
u"""Join list of StringList to single StringList
"""
strings = StringList()
for block in blocks:
strings.extend(block)
strings.extend(StringList([u''])) # insert a blank line
# remove the last blank line
if strings and not strings[-1]:
del strings[-1]
return strings
def align_strings(strings, header=''):
u"""Align StringList
if header is not empty, add header
"""
spaces = [len(l)-len(l.lstrip()) for l in strings.data if l]
min_spaces = min(spaces) if spaces else 0
if min_spaces > 0 or header:
for index in range(len(strings.data)):
strings.data[index] = header + strings.data[index][min_spaces:]
class ROSField(object):
u"""A field or constant in a message file with comments
"""
matcher = re.compile(r'([\w/]+)(\s*\[\s*\d*\s*\])?'
'\s+(\w+)(\s*=\s*[^#]+)?(\s*)(#.*)?$')
def __init__(self, line, source=None, offset=0,
pre_comments='', package_name=''):
self.source = source
self.offset = offset
result = self.matcher.match(line)
if result is None:
self.name = None
return
self.name = result.group(3)
self.type = result.group(1)
self.size = result.group(2).replace(' ', '') if result.group(2) else ''
self.value = result.group(4).lstrip()[1:] if result.group(4) else ''
comment = result.group(6) if result.group(6) else ''
if self.type == 'string' and self.value:
self.value += result.group(5) + comment
comment = ''
else:
self.value = self.value.strip()
comment = comment[1:]
if self.type not in BUILTIN_TYPES:
if '/' not in self.type:
# if the type is not builtin type and misses the package name
self.type = package_name + '/' + self.type
elif self.type == 'Header':
self.type = 'std_msgs/Header'
self.comment = StringList([comment], items=[(source, offset)])
self.pre_comments = pre_comments
self.post_comments = StringList()
def get_description(self, field_comment_option):
u"""Get the description of the field
"""
desc = StringList()
pre_blocks = split_blocks(self.pre_comments)
post_blocks = split_blocks(self.comment + self.post_comments)
if 'up-all' in field_comment_option:
desc = join_blocks(pre_blocks)
elif 'up' in field_comment_option:
if pre_blocks:
desc = pre_blocks[-1]
elif 'right1' in field_comment_option:
desc = self.comment
elif 'right-down' in field_comment_option:
if post_blocks:
desc = post_blocks[0]
elif 'right-down-all' in field_comment_option:
if post_blocks:
desc = join_blocks(post_blocks)
return desc
class ROSFieldGroup(object):
u"""A group of fields and constants.
"""
def __init__(self, field_name=None, field_label=None,
constant_name=None, constant_label=None):
self.field_name = field_name
self.field_label = field_label
self.constant_name = constant_name
self.constant_label = constant_label
def make_docfields(self, fields, field_comment_option):
docfields = StringList([u''])
for field in fields:
field_type = self.constant_name if field.value else self.field_name
name = field.name + field.size
desc = field.get_description(field_comment_option)
if len(desc) == 0:
docfields.append(u':{0} {1}:'.format(field_type, name),
source=field.source, offset=field.offset)
elif len(desc) == 1:
docfields.append(u':{0} {1}: {2}'.format(field_type,
name,
desc[0].strip()),
source=desc.source(0), offset=desc.offset(0))
elif len(desc) > 1:
if 'quote' in field_comment_option:
align_strings(desc, ' | ')
else:
align_strings(desc, ' ')
docfields.append(u':{0} {1}: {2}'.format(field_type,
name, desc[0]),
source=desc.source(0), offset=desc.offset(0))
docfields.extend(desc[1:])
docfields.append(u':{0}-{1} {2}: {3}'.format(field_type,
TYPE_SUFFIX,
name,
field.type),
source=field.source, offset=field.offset)
if field.value:
docfields.append(u':{0}-{1} {2}: {3}'.format(field_type,
VALUE_SUFFIX,
name,
field.value),
source=field.source, offset=field.offset)
return docfields
def get_doc_field_types(self):
return [
TypedField(self.field_name,
label=l_(self.field_label),
names=(self.field_name,),
typerolename='msg',
typenames=('{0}-{1}'.format(self.field_name,
TYPE_SUFFIX),)),
TypedField(self.constant_name,
label=l_(self.constant_label),
names=(self.constant_name,),
typerolename='msg',
typenames=('{0}-{1}'.format(self.constant_name,
TYPE_SUFFIX),)),
GroupedField('{0}-{1}'.format(self.constant_name,
VALUE_SUFFIX),
label=l_('{0} (Value)'.format(self.constant_label)),
names=('{0}-{1}'.format(self.constant_name,
VALUE_SUFFIX),)),
]
def get_doc_merge_fields(self):
return {'{0}-{1}'.format(self.constant_name, VALUE_SUFFIX):
self.constant_name}
class ROSTypeFile(object):
def __init__(self, ext=None, groups=None):
if groups is None:
groups = []
self.ext = ext
self.groups = groups
def get_doc_field_types(self):
return [doc_field_type
for field_group in self.groups
for doc_field_type in field_group.get_doc_field_types()]
def get_doc_merge_fields(self):
doc_merge_fields = {}
for field_group in self.groups:
doc_merge_fields.update(field_group.get_doc_merge_fields())
return doc_merge_fields
def read(self, package_path, ros_type):
type_file = os.path.join(package_path,
self.ext,
ros_type+'.'+self.ext)
if not os.path.exists(type_file):
file_content = None
else:
raw_content = codecs.open(type_file, 'r', 'utf-8').read()
file_content = StringList(raw_content.splitlines(),
source=type_file)
return type_file, file_content
def parse(self, file_content, package_name):
u"""
"""
all_fields = []
fields = []
pre_comments = StringList()
for item in file_content.xitems(): # (source, offset, value)
line = item[2].strip()
if line and not [c for c in line if not c == '-']:
all_fields.append(fields)
fields = []
elif line == '' or line[0] == '#':
if line:
line = line[1:]
if fields:
fields[-1].post_comments.append(line,
source=item[0],
offset=item[1])
pre_comments.append(line, source=item[0], offset=item[1])
else:
new_field = ROSField(line, source=item[0], offset=item[1],
pre_comments=pre_comments,
package_name=package_name)
# if sucessfully parsed
if new_field.name:
fields.append(new_field)
pre_comments = StringList()
else:
# todo
print("?? <%s>" % line)
all_fields.append(fields)
return all_fields
def make_docfields(self, all_fields, field_comment_option):
docfields = StringList()
for field_group, fields in zip(self.groups, all_fields):
docfields.extend(field_group.make_docfields(fields,
field_comment_option))
return docfields
class ROSType(ROSObjectDescription):
has_arguments = True
option_spec = {
'noindex': directives.flag,
}
def merge_field(self, src_node, dest_node):
dest_node.insert(4, nodes.Text(':'))
dest_node.insert(5, nodes.literal('', src_node[2].astext()))
class ROSAutoType(ROSType):
option_spec = {
'noindex': directives.flag,
'base': directives.path,
'description': directives.unchanged,
'raw': lambda x: directives.choice(x, ('head', 'tail')),
'field-comment': directives.unchanged,
}
def update_content(self):
package_name, type_name = self.arguments[0].split('/', 1)
package = self.find_package(package_name)
if not package:
return
file_path, file_content \
= self.type_file.read(os.path.dirname(package.filename),
type_name)
if file_content is None:
self.state_machine.reporter.warning(
'cannot find file {0}'.format(file_path),
line=self.lineno)
return
type_relfile = os.path.relpath(file_path, self.env.srcdir)
self.env.note_dependency(type_relfile)
fields = self.type_file.parse(file_content, package_name)
# fields
options = self.options.get('field-comment', '')
field_comment_option = options.encode('ascii').lower().split()
content = self.type_file.make_docfields(fields, field_comment_option)
# description
if fields[0] and fields[0][0]:
desc = fields[0][0].pre_comments
desc_blocks = split_blocks(desc)
if desc_blocks:
description_option = [x.strip() for x in
self.options.get('description', '').
encode('ascii').lower().split(',')]
first = second = None
for option in description_option:
if not option: # ignore empty option
pass
elif ':' in option:
first, second = option.split(':', 1)
elif option == 'quote':
pass
else:
raise ValueError(
"unkonwn option {0} in "
"the description option".format(option))
blocks = desc_blocks[(int(first) if first else None):
(int(second) if second else None)]
if blocks:
description = join_blocks(blocks)
if 'quote' in description_option:
align_strings(description, '| ')
else:
align_strings(description)
content = content + StringList([u'']) + description
content = content + self.content
# raw file content
raw_option = self.options.get('raw', None)
#
if raw_option is not None:
code_block = StringList([u'', u'.. code-block:: rostype', u''])
code_block.extend(StringList([' '+l for l in file_content.data],
items=file_content.items))
if raw_option == 'head':
content = code_block + StringList([u'']) + content
elif raw_option == 'tail':
content = content + code_block
return content
def run(self):
self.name = self.name.replace('auto', '')
return ROSType.run(self)
class ROSMessageBase(object):
type_file = ROSTypeFile(
ext='msg',
groups=[
ROSFieldGroup(field_name='field',
field_label='Field',
constant_name='constant',
constant_label='Constant'),
])
doc_field_types = type_file.get_doc_field_types()
doc_merge_fields = type_file.get_doc_merge_fields()
class ROSMessage(ROSMessageBase, ROSType):
pass
class ROSAutoMessage(ROSMessageBase, ROSAutoType):
pass
class ROSServiceBase(object):
type_file = ROSTypeFile(
ext='srv',
groups=[
ROSFieldGroup(field_name='req-field',
field_label='Field (Request)',
constant_name='req-constant',
constant_label='Constant (Request)'),
ROSFieldGroup(field_name='res-field',
field_label='Field (Response)',
constant_name='res-constant',
constant_label='Constant (Response)')
])
doc_field_types = type_file.get_doc_field_types()
doc_merge_fields = type_file.get_doc_merge_fields()
class ROSService(ROSServiceBase, ROSType):
pass
class ROSAutoService(ROSServiceBase, ROSAutoType):
pass
class ROSActionBase(object):
type_file = ROSTypeFile(
ext='action',
groups=[
ROSFieldGroup(field_name='goal-field',
field_label='Field (Goal)',
constant_name='goal-constant',
constant_label='Constant (Goal)'),
ROSFieldGroup(field_name='result-field',
field_label='Field (Result)',
constant_name='result-constant',
constant_label='Constant (Result)'),
ROSFieldGroup(field_name='feedback-field',
field_label='Field (Feedback)',
constant_name='feedback-constant',
constant_label='Constant (Feedback)')
])
doc_field_types = type_file.get_doc_field_types()
doc_merge_fields = type_file.get_doc_merge_fields()
class ROSAction(ROSActionBase, ROSType):
pass
class ROSAutoAction(ROSActionBase, ROSAutoType):
pass
class ROSTypeLexer(RegexLexer):
name = 'ROSTYPE'
aliases = ['rostype']
filenames = ['*.msg', '*.srv', '*.action']
tokens = {
'common': [
(r'[ \t]+', Text),
(r'#.*$', Comment.Single),
(r'[\[\]]', Punctuation),
(r'=', Operator),
(r'\-?(\d+\.\d*|\.\d+)', Number.Float),
(r'\-?\d+', Number.Integer),
],
'field': [
include('common'),
(r'\n', Text, '#pop'),
(r'\w+', Name.Property, '#pop'),
],
'root': [
include('common'),
(r'\n', Text),
(r'---\n', Keyword),
(r'(string)(\s+)([a-zA-Z_]\w*)(\s*)(=)(\s*)(.*)(\s*\n)',
bygroups(Name.Builtin, Text,
Name.Property, Text,
Operator, Text,
Literal.String, Text)),
('(' + '|'.join(BUILTIN_TYPES) + ')', Name.Builtin, 'field'),
(r'[\w/]+', Name.Class, 'field'),
],
}
| |
"""
"PYSTONE" Benchmark Program
Version: Python/1.1 (corresponds to C/1.1 plus 2 Pystone fixes)
Author: Reinhold P. Weicker, CACM Vol 27, No 10, 10/84 pg. 1013.
Translated from ADA to C by Rick Richardson.
Every method to preserve ADA-likeness has been used,
at the expense of C-ness.
Translated from C to Python by Guido van Rossum.
Version History:
Version 1.1 corrects two bugs in version 1.0:
First, it leaked memory: in Proc1(), NextRecord ends
up having a pointer to itself. I have corrected this
by zapping NextRecord.PtrComp at the end of Proc1().
Second, Proc3() used the operator != to compare a
record to None. This is rather inefficient and not
true to the intention of the original benchmark (where
a pointer comparison to None is intended; the !=
operator attempts to find a method __cmp__ to do value
comparison of the record). Version 1.1 runs 5-10
percent faster than version 1.0, so benchmark figures
of different versions can't be compared directly.
"""
LOOPS = 50000
from time import time as clock
__version__ = "1.1"
[Ident1, Ident2, Ident3, Ident4, Ident5] = range(1, 6)
class Record:
def __init__(self, PtrComp = None, Discr = 0, EnumComp = 0,
IntComp = 0, StringComp = 0):
self.PtrComp = PtrComp
self.Discr = Discr
self.EnumComp = EnumComp
self.IntComp = IntComp
self.StringComp = StringComp
def copy(self):
return Record(self.PtrComp, self.Discr, self.EnumComp,
self.IntComp, self.StringComp)
TRUE = 1
FALSE = 0
def main(loops=LOOPS):
benchtime, stones = pystones(loops)
print("Pystone(%s) time for %d passes = %g" % \
(__version__, loops, benchtime))
print("This machine benchmarks at %g pystones/second" % stones)
def pystones(loops=LOOPS):
return Proc0(loops)
IntGlob = 0
BoolGlob = FALSE
Char1Glob = '\0'
Char2Glob = '\0'
Array1Glob = [0]*51
Array2Glob = list(map(lambda x: x[:], [Array1Glob]*51))
PtrGlb = None
PtrGlbNext = None
def Proc0(loops=LOOPS):
global IntGlob
global BoolGlob
global Char1Glob
global Char2Glob
global Array1Glob
global Array2Glob
global PtrGlb
global PtrGlbNext
starttime = clock()
for i in range(loops):
pass
nulltime = clock() - starttime
PtrGlbNext = Record()
PtrGlb = Record()
PtrGlb.PtrComp = PtrGlbNext
PtrGlb.Discr = Ident1
PtrGlb.EnumComp = Ident3
PtrGlb.IntComp = 40
PtrGlb.StringComp = "DHRYSTONE PROGRAM, SOME STRING"
String1Loc = "DHRYSTONE PROGRAM, 1'ST STRING"
Array2Glob[8][7] = 10
starttime = clock()
for i in range(loops):
Proc5()
Proc4()
IntLoc1 = 2
IntLoc2 = 3
String2Loc = "DHRYSTONE PROGRAM, 2'ND STRING"
EnumLoc = Ident2
BoolGlob = not Func2(String1Loc, String2Loc)
while IntLoc1 < IntLoc2:
IntLoc3 = 5 * IntLoc1 - IntLoc2
IntLoc3 = Proc7(IntLoc1, IntLoc2)
IntLoc1 = IntLoc1 + 1
Proc8(Array1Glob, Array2Glob, IntLoc1, IntLoc3)
PtrGlb = Proc1(PtrGlb)
CharIndex = 'A'
while CharIndex <= Char2Glob:
if EnumLoc == Func1(CharIndex, 'C'):
EnumLoc = Proc6(Ident1)
CharIndex = chr(ord(CharIndex)+1)
IntLoc3 = IntLoc2 * IntLoc1
IntLoc2 = IntLoc3 / IntLoc1
IntLoc2 = 7 * (IntLoc3 - IntLoc2) - IntLoc1
IntLoc1 = Proc2(IntLoc1)
benchtime = clock() - starttime - nulltime
if benchtime == 0.0:
loopsPerBenchtime = 0.0
else:
loopsPerBenchtime = (loops / benchtime)
return benchtime, loopsPerBenchtime
def Proc1(PtrParIn):
PtrParIn.PtrComp = NextRecord = PtrGlb.copy()
PtrParIn.IntComp = 5
NextRecord.IntComp = PtrParIn.IntComp
NextRecord.PtrComp = PtrParIn.PtrComp
NextRecord.PtrComp = Proc3(NextRecord.PtrComp)
if NextRecord.Discr == Ident1:
NextRecord.IntComp = 6
NextRecord.EnumComp = Proc6(PtrParIn.EnumComp)
NextRecord.PtrComp = PtrGlb.PtrComp
NextRecord.IntComp = Proc7(NextRecord.IntComp, 10)
else:
PtrParIn = NextRecord.copy()
NextRecord.PtrComp = None
return PtrParIn
def Proc2(IntParIO):
IntLoc = IntParIO + 10
while 1:
if Char1Glob == 'A':
IntLoc = IntLoc - 1
IntParIO = IntLoc - IntGlob
EnumLoc = Ident1
if EnumLoc == Ident1:
break
return IntParIO
def Proc3(PtrParOut):
global IntGlob
if PtrGlb is not None:
PtrParOut = PtrGlb.PtrComp
else:
IntGlob = 100
PtrGlb.IntComp = Proc7(10, IntGlob)
return PtrParOut
def Proc4():
global Char2Glob
BoolLoc = Char1Glob == 'A'
BoolLoc = BoolLoc or BoolGlob
Char2Glob = 'B'
def Proc5():
global Char1Glob
global BoolGlob
Char1Glob = 'A'
BoolGlob = FALSE
def Proc6(EnumParIn):
EnumParOut = EnumParIn
if not Func3(EnumParIn):
EnumParOut = Ident4
if EnumParIn == Ident1:
EnumParOut = Ident1
elif EnumParIn == Ident2:
if IntGlob > 100:
EnumParOut = Ident1
else:
EnumParOut = Ident4
elif EnumParIn == Ident3:
EnumParOut = Ident2
elif EnumParIn == Ident4:
pass
elif EnumParIn == Ident5:
EnumParOut = Ident3
return EnumParOut
def Proc7(IntParI1, IntParI2):
IntLoc = IntParI1 + 2
IntParOut = IntParI2 + IntLoc
return IntParOut
def Proc8(Array1Par, Array2Par, IntParI1, IntParI2):
global IntGlob
IntLoc = IntParI1 + 5
Array1Par[IntLoc] = IntParI2
Array1Par[IntLoc+1] = Array1Par[IntLoc]
Array1Par[IntLoc+30] = IntLoc
for IntIndex in range(IntLoc, IntLoc+2):
Array2Par[IntLoc][IntIndex] = IntLoc
Array2Par[IntLoc][IntLoc-1] = Array2Par[IntLoc][IntLoc-1] + 1
Array2Par[IntLoc+20][IntLoc] = Array1Par[IntLoc]
IntGlob = 5
def Func1(CharPar1, CharPar2):
CharLoc1 = CharPar1
CharLoc2 = CharLoc1
if CharLoc2 != CharPar2:
return Ident1
else:
return Ident2
def Func2(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE
def Func3(EnumParIn):
EnumLoc = EnumParIn
if EnumLoc == Ident3: return TRUE
return FALSE
main(LOOPS)
| |
# coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AccessCodeFormat(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'format_required': 'str',
'format_required_metadata': 'SettingsMetadata',
'letter_required': 'str',
'letter_required_metadata': 'SettingsMetadata',
'minimum_length': 'str',
'minimum_length_metadata': 'SettingsMetadata',
'number_required': 'str',
'number_required_metadata': 'SettingsMetadata',
'special_character_required': 'str',
'special_character_required_metadata': 'SettingsMetadata'
}
attribute_map = {
'format_required': 'formatRequired',
'format_required_metadata': 'formatRequiredMetadata',
'letter_required': 'letterRequired',
'letter_required_metadata': 'letterRequiredMetadata',
'minimum_length': 'minimumLength',
'minimum_length_metadata': 'minimumLengthMetadata',
'number_required': 'numberRequired',
'number_required_metadata': 'numberRequiredMetadata',
'special_character_required': 'specialCharacterRequired',
'special_character_required_metadata': 'specialCharacterRequiredMetadata'
}
def __init__(self, format_required=None, format_required_metadata=None, letter_required=None, letter_required_metadata=None, minimum_length=None, minimum_length_metadata=None, number_required=None, number_required_metadata=None, special_character_required=None, special_character_required_metadata=None): # noqa: E501
"""AccessCodeFormat - a model defined in Swagger""" # noqa: E501
self._format_required = None
self._format_required_metadata = None
self._letter_required = None
self._letter_required_metadata = None
self._minimum_length = None
self._minimum_length_metadata = None
self._number_required = None
self._number_required_metadata = None
self._special_character_required = None
self._special_character_required_metadata = None
self.discriminator = None
if format_required is not None:
self.format_required = format_required
if format_required_metadata is not None:
self.format_required_metadata = format_required_metadata
if letter_required is not None:
self.letter_required = letter_required
if letter_required_metadata is not None:
self.letter_required_metadata = letter_required_metadata
if minimum_length is not None:
self.minimum_length = minimum_length
if minimum_length_metadata is not None:
self.minimum_length_metadata = minimum_length_metadata
if number_required is not None:
self.number_required = number_required
if number_required_metadata is not None:
self.number_required_metadata = number_required_metadata
if special_character_required is not None:
self.special_character_required = special_character_required
if special_character_required_metadata is not None:
self.special_character_required_metadata = special_character_required_metadata
@property
def format_required(self):
"""Gets the format_required of this AccessCodeFormat. # noqa: E501
# noqa: E501
:return: The format_required of this AccessCodeFormat. # noqa: E501
:rtype: str
"""
return self._format_required
@format_required.setter
def format_required(self, format_required):
"""Sets the format_required of this AccessCodeFormat.
# noqa: E501
:param format_required: The format_required of this AccessCodeFormat. # noqa: E501
:type: str
"""
self._format_required = format_required
@property
def format_required_metadata(self):
"""Gets the format_required_metadata of this AccessCodeFormat. # noqa: E501
:return: The format_required_metadata of this AccessCodeFormat. # noqa: E501
:rtype: SettingsMetadata
"""
return self._format_required_metadata
@format_required_metadata.setter
def format_required_metadata(self, format_required_metadata):
"""Sets the format_required_metadata of this AccessCodeFormat.
:param format_required_metadata: The format_required_metadata of this AccessCodeFormat. # noqa: E501
:type: SettingsMetadata
"""
self._format_required_metadata = format_required_metadata
@property
def letter_required(self):
"""Gets the letter_required of this AccessCodeFormat. # noqa: E501
# noqa: E501
:return: The letter_required of this AccessCodeFormat. # noqa: E501
:rtype: str
"""
return self._letter_required
@letter_required.setter
def letter_required(self, letter_required):
"""Sets the letter_required of this AccessCodeFormat.
# noqa: E501
:param letter_required: The letter_required of this AccessCodeFormat. # noqa: E501
:type: str
"""
self._letter_required = letter_required
@property
def letter_required_metadata(self):
"""Gets the letter_required_metadata of this AccessCodeFormat. # noqa: E501
:return: The letter_required_metadata of this AccessCodeFormat. # noqa: E501
:rtype: SettingsMetadata
"""
return self._letter_required_metadata
@letter_required_metadata.setter
def letter_required_metadata(self, letter_required_metadata):
"""Sets the letter_required_metadata of this AccessCodeFormat.
:param letter_required_metadata: The letter_required_metadata of this AccessCodeFormat. # noqa: E501
:type: SettingsMetadata
"""
self._letter_required_metadata = letter_required_metadata
@property
def minimum_length(self):
"""Gets the minimum_length of this AccessCodeFormat. # noqa: E501
# noqa: E501
:return: The minimum_length of this AccessCodeFormat. # noqa: E501
:rtype: str
"""
return self._minimum_length
@minimum_length.setter
def minimum_length(self, minimum_length):
"""Sets the minimum_length of this AccessCodeFormat.
# noqa: E501
:param minimum_length: The minimum_length of this AccessCodeFormat. # noqa: E501
:type: str
"""
self._minimum_length = minimum_length
@property
def minimum_length_metadata(self):
"""Gets the minimum_length_metadata of this AccessCodeFormat. # noqa: E501
:return: The minimum_length_metadata of this AccessCodeFormat. # noqa: E501
:rtype: SettingsMetadata
"""
return self._minimum_length_metadata
@minimum_length_metadata.setter
def minimum_length_metadata(self, minimum_length_metadata):
"""Sets the minimum_length_metadata of this AccessCodeFormat.
:param minimum_length_metadata: The minimum_length_metadata of this AccessCodeFormat. # noqa: E501
:type: SettingsMetadata
"""
self._minimum_length_metadata = minimum_length_metadata
@property
def number_required(self):
"""Gets the number_required of this AccessCodeFormat. # noqa: E501
# noqa: E501
:return: The number_required of this AccessCodeFormat. # noqa: E501
:rtype: str
"""
return self._number_required
@number_required.setter
def number_required(self, number_required):
"""Sets the number_required of this AccessCodeFormat.
# noqa: E501
:param number_required: The number_required of this AccessCodeFormat. # noqa: E501
:type: str
"""
self._number_required = number_required
@property
def number_required_metadata(self):
"""Gets the number_required_metadata of this AccessCodeFormat. # noqa: E501
:return: The number_required_metadata of this AccessCodeFormat. # noqa: E501
:rtype: SettingsMetadata
"""
return self._number_required_metadata
@number_required_metadata.setter
def number_required_metadata(self, number_required_metadata):
"""Sets the number_required_metadata of this AccessCodeFormat.
:param number_required_metadata: The number_required_metadata of this AccessCodeFormat. # noqa: E501
:type: SettingsMetadata
"""
self._number_required_metadata = number_required_metadata
@property
def special_character_required(self):
"""Gets the special_character_required of this AccessCodeFormat. # noqa: E501
# noqa: E501
:return: The special_character_required of this AccessCodeFormat. # noqa: E501
:rtype: str
"""
return self._special_character_required
@special_character_required.setter
def special_character_required(self, special_character_required):
"""Sets the special_character_required of this AccessCodeFormat.
# noqa: E501
:param special_character_required: The special_character_required of this AccessCodeFormat. # noqa: E501
:type: str
"""
self._special_character_required = special_character_required
@property
def special_character_required_metadata(self):
"""Gets the special_character_required_metadata of this AccessCodeFormat. # noqa: E501
:return: The special_character_required_metadata of this AccessCodeFormat. # noqa: E501
:rtype: SettingsMetadata
"""
return self._special_character_required_metadata
@special_character_required_metadata.setter
def special_character_required_metadata(self, special_character_required_metadata):
"""Sets the special_character_required_metadata of this AccessCodeFormat.
:param special_character_required_metadata: The special_character_required_metadata of this AccessCodeFormat. # noqa: E501
:type: SettingsMetadata
"""
self._special_character_required_metadata = special_character_required_metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AccessCodeFormat, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccessCodeFormat):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.spanner_v1.types import keys
from google.cloud.spanner_v1.types import mutation
from google.cloud.spanner_v1.types import result_set
from google.cloud.spanner_v1.types import transaction as gs_transaction
from google.cloud.spanner_v1.types import type as gs_type
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.spanner.v1",
manifest={
"CreateSessionRequest",
"BatchCreateSessionsRequest",
"BatchCreateSessionsResponse",
"Session",
"GetSessionRequest",
"ListSessionsRequest",
"ListSessionsResponse",
"DeleteSessionRequest",
"RequestOptions",
"ExecuteSqlRequest",
"ExecuteBatchDmlRequest",
"ExecuteBatchDmlResponse",
"PartitionOptions",
"PartitionQueryRequest",
"PartitionReadRequest",
"Partition",
"PartitionResponse",
"ReadRequest",
"BeginTransactionRequest",
"CommitRequest",
"RollbackRequest",
},
)
class CreateSessionRequest(proto.Message):
r"""The request for
[CreateSession][google.spanner.v1.Spanner.CreateSession].
Attributes:
database (str):
Required. The database in which the new
session is created.
session (google.cloud.spanner_v1.types.Session):
Required. The session to create.
"""
database = proto.Field(proto.STRING, number=1,)
session = proto.Field(proto.MESSAGE, number=2, message="Session",)
class BatchCreateSessionsRequest(proto.Message):
r"""The request for
[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
Attributes:
database (str):
Required. The database in which the new
sessions are created.
session_template (google.cloud.spanner_v1.types.Session):
Parameters to be applied to each created
session.
session_count (int):
Required. The number of sessions to be created in this batch
call. The API may return fewer than the requested number of
sessions. If a specific number of sessions are desired, the
client can make additional calls to BatchCreateSessions
(adjusting
[session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count]
as necessary).
"""
database = proto.Field(proto.STRING, number=1,)
session_template = proto.Field(proto.MESSAGE, number=2, message="Session",)
session_count = proto.Field(proto.INT32, number=3,)
class BatchCreateSessionsResponse(proto.Message):
r"""The response for
[BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions].
Attributes:
session (Sequence[google.cloud.spanner_v1.types.Session]):
The freshly created sessions.
"""
session = proto.RepeatedField(proto.MESSAGE, number=1, message="Session",)
class Session(proto.Message):
r"""A session in the Cloud Spanner API.
Attributes:
name (str):
Output only. The name of the session. This is
always system-assigned.
labels (Sequence[google.cloud.spanner_v1.types.Session.LabelsEntry]):
The labels for the session.
- Label keys must be between 1 and 63 characters long and
must conform to the following regular expression:
``[a-z]([-a-z0-9]*[a-z0-9])?``.
- Label values must be between 0 and 63 characters long and
must conform to the regular expression
``([a-z]([-a-z0-9]*[a-z0-9])?)?``.
- No more than 64 labels can be associated with a given
session.
See https://goo.gl/xmQnxf for more information on and
examples of labels.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The timestamp when the session
is created.
approximate_last_use_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. The approximate timestamp when
the session is last used. It is typically
earlier than the actual last use time.
"""
name = proto.Field(proto.STRING, number=1,)
labels = proto.MapField(proto.STRING, proto.STRING, number=2,)
create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
approximate_last_use_time = proto.Field(
proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,
)
class GetSessionRequest(proto.Message):
r"""The request for [GetSession][google.spanner.v1.Spanner.GetSession].
Attributes:
name (str):
Required. The name of the session to
retrieve.
"""
name = proto.Field(proto.STRING, number=1,)
class ListSessionsRequest(proto.Message):
r"""The request for
[ListSessions][google.spanner.v1.Spanner.ListSessions].
Attributes:
database (str):
Required. The database in which to list
sessions.
page_size (int):
Number of sessions to be returned in the
response. If 0 or less, defaults to the server's
maximum allowed page size.
page_token (str):
If non-empty, ``page_token`` should contain a
[next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token]
from a previous
[ListSessionsResponse][google.spanner.v1.ListSessionsResponse].
filter (str):
An expression for filtering the results of the request.
Filter rules are case insensitive. The fields eligible for
filtering are:
- ``labels.key`` where key is the name of a label
Some examples of using filters are:
- ``labels.env:*`` --> The session has the label "env".
- ``labels.env:dev`` --> The session has the label "env"
and the value of the label contains the string "dev".
"""
database = proto.Field(proto.STRING, number=1,)
page_size = proto.Field(proto.INT32, number=2,)
page_token = proto.Field(proto.STRING, number=3,)
filter = proto.Field(proto.STRING, number=4,)
class ListSessionsResponse(proto.Message):
r"""The response for
[ListSessions][google.spanner.v1.Spanner.ListSessions].
Attributes:
sessions (Sequence[google.cloud.spanner_v1.types.Session]):
The list of requested sessions.
next_page_token (str):
``next_page_token`` can be sent in a subsequent
[ListSessions][google.spanner.v1.Spanner.ListSessions] call
to fetch more of the matching sessions.
"""
@property
def raw_page(self):
return self
sessions = proto.RepeatedField(proto.MESSAGE, number=1, message="Session",)
next_page_token = proto.Field(proto.STRING, number=2,)
class DeleteSessionRequest(proto.Message):
r"""The request for
[DeleteSession][google.spanner.v1.Spanner.DeleteSession].
Attributes:
name (str):
Required. The name of the session to delete.
"""
name = proto.Field(proto.STRING, number=1,)
class RequestOptions(proto.Message):
r"""Common request options for various APIs.
Attributes:
priority (google.cloud.spanner_v1.types.RequestOptions.Priority):
Priority for the request.
request_tag (str):
A per-request tag which can be applied to queries or reads,
used for statistics collection. Both request_tag and
transaction_tag can be specified for a read or query that
belongs to a transaction. This field is ignored for requests
where it's not applicable (e.g. CommitRequest). Legal
characters for ``request_tag`` values are all printable
characters (ASCII 32 - 126) and the length of a request_tag
is limited to 50 characters. Values that exceed this limit
are truncated. Any leading underscore (_) characters will be
removed from the string.
transaction_tag (str):
A tag used for statistics collection about this transaction.
Both request_tag and transaction_tag can be specified for a
read or query that belongs to a transaction. The value of
transaction_tag should be the same for all requests
belonging to the same transaction. If this request doesn't
belong to any transaction, transaction_tag will be ignored.
Legal characters for ``transaction_tag`` values are all
printable characters (ASCII 32 - 126) and the length of a
transaction_tag is limited to 50 characters. Values that
exceed this limit are truncated. Any leading underscore (_)
characters will be removed from the string.
"""
class Priority(proto.Enum):
r"""The relative priority for requests. Note that priority is not
applicable for
[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
The priority acts as a hint to the Cloud Spanner scheduler and does
not guarantee priority or order of execution. For example:
- Some parts of a write operation always execute at
``PRIORITY_HIGH``, regardless of the specified priority. This may
cause you to see an increase in high priority workload even when
executing a low priority request. This can also potentially cause
a priority inversion where a lower priority request will be
fulfilled ahead of a higher priority request.
- If a transaction contains multiple operations with different
priorities, Cloud Spanner does not guarantee to process the
higher priority operations first. There may be other constraints
to satisfy, such as order of operations.
"""
PRIORITY_UNSPECIFIED = 0
PRIORITY_LOW = 1
PRIORITY_MEDIUM = 2
PRIORITY_HIGH = 3
priority = proto.Field(proto.ENUM, number=1, enum=Priority,)
request_tag = proto.Field(proto.STRING, number=2,)
transaction_tag = proto.Field(proto.STRING, number=3,)
class ExecuteSqlRequest(proto.Message):
r"""The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]
and
[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql].
Attributes:
session (str):
Required. The session in which the SQL query
should be performed.
transaction (google.cloud.spanner_v1.types.TransactionSelector):
The transaction to use.
For queries, if none is provided, the default is
a temporary read-only transaction with strong
concurrency.
Standard DML statements require a read-write
transaction. To protect against replays,
single-use transactions are not supported. The
caller must either supply an existing
transaction ID or begin a new transaction.
Partitioned DML requires an existing Partitioned
DML transaction ID.
sql (str):
Required. The SQL string.
params (google.protobuf.struct_pb2.Struct):
Parameter names and values that bind to placeholders in the
SQL string.
A parameter placeholder consists of the ``@`` character
followed by the parameter name (for example,
``@firstName``). Parameter names must conform to the naming
requirements of identifiers as specified at
https://cloud.google.com/spanner/docs/lexical#identifiers.
Parameters can appear anywhere that a literal value is
expected. The same parameter name can be used more than
once, for example:
``"WHERE id > @msg_id AND id < @msg_id + 100"``
It is an error to execute a SQL statement with unbound
parameters.
param_types (Sequence[google.cloud.spanner_v1.types.ExecuteSqlRequest.ParamTypesEntry]):
It is not always possible for Cloud Spanner to infer the
right SQL type from a JSON value. For example, values of
type ``BYTES`` and values of type ``STRING`` both appear in
[params][google.spanner.v1.ExecuteSqlRequest.params] as JSON
strings.
In these cases, ``param_types`` can be used to specify the
exact SQL type for some or all of the SQL statement
parameters. See the definition of
[Type][google.spanner.v1.Type] for more information about
SQL types.
resume_token (bytes):
If this request is resuming a previously interrupted SQL
statement execution, ``resume_token`` should be copied from
the last
[PartialResultSet][google.spanner.v1.PartialResultSet]
yielded before the interruption. Doing this enables the new
SQL statement execution to resume where the last one left
off. The rest of the request parameters must exactly match
the request that yielded this token.
query_mode (google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryMode):
Used to control the amount of debugging information returned
in [ResultSetStats][google.spanner.v1.ResultSetStats]. If
[partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token]
is set,
[query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode]
can only be set to
[QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL].
partition_token (bytes):
If present, results will be restricted to the specified
partition previously created using PartitionQuery(). There
must be an exact match for the values of fields common to
this message and the PartitionQueryRequest message used to
create this partition_token.
seqno (int):
A per-transaction sequence number used to
identify this request. This field makes each
request idempotent such that if the request is
received multiple times, at most one will
succeed.
The sequence number must be monotonically
increasing within the transaction. If a request
arrives for the first time with an out-of-order
sequence number, the transaction may be aborted.
Replays of previously handled requests will
yield the same response as the first execution.
Required for DML statements. Ignored for
queries.
query_options (google.cloud.spanner_v1.types.ExecuteSqlRequest.QueryOptions):
Query optimizer configuration to use for the
given query.
request_options (google.cloud.spanner_v1.types.RequestOptions):
Common options for this request.
"""
class QueryMode(proto.Enum):
r"""Mode in which the statement must be processed."""
NORMAL = 0
PLAN = 1
PROFILE = 2
class QueryOptions(proto.Message):
r"""Query optimizer configuration.
Attributes:
optimizer_version (str):
An option to control the selection of optimizer version.
This parameter allows individual queries to pick different
query optimizer versions.
Specifying ``latest`` as a value instructs Cloud Spanner to
use the latest supported query optimizer version. If not
specified, Cloud Spanner uses the optimizer version set at
the database level options. Any other positive integer (from
the list of supported optimizer versions) overrides the
default optimizer version for query execution.
The list of supported optimizer versions can be queried from
SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS.
Executing a SQL statement with an invalid optimizer version
fails with an ``INVALID_ARGUMENT`` error.
See
https://cloud.google.com/spanner/docs/query-optimizer/manage-query-optimizer
for more information on managing the query optimizer.
The ``optimizer_version`` statement hint has precedence over
this setting.
optimizer_statistics_package (str):
An option to control the selection of optimizer statistics
package.
This parameter allows individual queries to use a different
query optimizer statistics package.
Specifying ``latest`` as a value instructs Cloud Spanner to
use the latest generated statistics package. If not
specified, Cloud Spanner uses the statistics package set at
the database level options, or the latest package if the
database option is not set.
The statistics package requested by the query has to be
exempt from garbage collection. This can be achieved with
the following DDL statement:
::
ALTER STATISTICS <package_name> SET OPTIONS (allow_gc=false)
The list of available statistics packages can be queried
from ``INFORMATION_SCHEMA.SPANNER_STATISTICS``.
Executing a SQL statement with an invalid optimizer
statistics package or with a statistics package that allows
garbage collection fails with an ``INVALID_ARGUMENT`` error.
"""
optimizer_version = proto.Field(proto.STRING, number=1,)
optimizer_statistics_package = proto.Field(proto.STRING, number=2,)
session = proto.Field(proto.STRING, number=1,)
transaction = proto.Field(
proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector,
)
sql = proto.Field(proto.STRING, number=3,)
params = proto.Field(proto.MESSAGE, number=4, message=struct_pb2.Struct,)
param_types = proto.MapField(
proto.STRING, proto.MESSAGE, number=5, message=gs_type.Type,
)
resume_token = proto.Field(proto.BYTES, number=6,)
query_mode = proto.Field(proto.ENUM, number=7, enum=QueryMode,)
partition_token = proto.Field(proto.BYTES, number=8,)
seqno = proto.Field(proto.INT64, number=9,)
query_options = proto.Field(proto.MESSAGE, number=10, message=QueryOptions,)
request_options = proto.Field(proto.MESSAGE, number=11, message="RequestOptions",)
class ExecuteBatchDmlRequest(proto.Message):
r"""The request for
[ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml].
Attributes:
session (str):
Required. The session in which the DML
statements should be performed.
transaction (google.cloud.spanner_v1.types.TransactionSelector):
Required. The transaction to use. Must be a
read-write transaction.
To protect against replays, single-use
transactions are not supported. The caller must
either supply an existing transaction ID or
begin a new transaction.
statements (Sequence[google.cloud.spanner_v1.types.ExecuteBatchDmlRequest.Statement]):
Required. The list of statements to execute in this batch.
Statements are executed serially, such that the effects of
statement ``i`` are visible to statement ``i+1``. Each
statement must be a DML statement. Execution stops at the
first failed statement; the remaining statements are not
executed.
Callers must provide at least one statement.
seqno (int):
Required. A per-transaction sequence number
used to identify this request. This field makes
each request idempotent such that if the request
is received multiple times, at most one will
succeed.
The sequence number must be monotonically
increasing within the transaction. If a request
arrives for the first time with an out-of-order
sequence number, the transaction may be aborted.
Replays of previously handled requests will
yield the same response as the first execution.
request_options (google.cloud.spanner_v1.types.RequestOptions):
Common options for this request.
"""
class Statement(proto.Message):
r"""A single DML statement.
Attributes:
sql (str):
Required. The DML string.
params (google.protobuf.struct_pb2.Struct):
Parameter names and values that bind to placeholders in the
DML string.
A parameter placeholder consists of the ``@`` character
followed by the parameter name (for example,
``@firstName``). Parameter names can contain letters,
numbers, and underscores.
Parameters can appear anywhere that a literal value is
expected. The same parameter name can be used more than
once, for example:
``"WHERE id > @msg_id AND id < @msg_id + 100"``
It is an error to execute a SQL statement with unbound
parameters.
param_types (Sequence[google.cloud.spanner_v1.types.ExecuteBatchDmlRequest.Statement.ParamTypesEntry]):
It is not always possible for Cloud Spanner to infer the
right SQL type from a JSON value. For example, values of
type ``BYTES`` and values of type ``STRING`` both appear in
[params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params]
as JSON strings.
In these cases, ``param_types`` can be used to specify the
exact SQL type for some or all of the SQL statement
parameters. See the definition of
[Type][google.spanner.v1.Type] for more information about
SQL types.
"""
sql = proto.Field(proto.STRING, number=1,)
params = proto.Field(proto.MESSAGE, number=2, message=struct_pb2.Struct,)
param_types = proto.MapField(
proto.STRING, proto.MESSAGE, number=3, message=gs_type.Type,
)
session = proto.Field(proto.STRING, number=1,)
transaction = proto.Field(
proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector,
)
statements = proto.RepeatedField(proto.MESSAGE, number=3, message=Statement,)
seqno = proto.Field(proto.INT64, number=4,)
request_options = proto.Field(proto.MESSAGE, number=5, message="RequestOptions",)
class ExecuteBatchDmlResponse(proto.Message):
r"""The response for
[ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml].
Contains a list of [ResultSet][google.spanner.v1.ResultSet]
messages, one for each DML statement that has successfully executed,
in the same order as the statements in the request. If a statement
fails, the status in the response body identifies the cause of the
failure.
To check for DML statements that failed, use the following approach:
1. Check the status in the response message. The
[google.rpc.Code][google.rpc.Code] enum value ``OK`` indicates
that all statements were executed successfully.
2. If the status was not ``OK``, check the number of result sets in
the response. If the response contains ``N``
[ResultSet][google.spanner.v1.ResultSet] messages, then statement
``N+1`` in the request failed.
Example 1:
- Request: 5 DML statements, all executed successfully.
- Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages,
with the status ``OK``.
Example 2:
- Request: 5 DML statements. The third statement has a syntax
error.
- Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages,
and a syntax error (``INVALID_ARGUMENT``) status. The number of
[ResultSet][google.spanner.v1.ResultSet] messages indicates that
the third statement failed, and the fourth and fifth statements
were not executed.
Attributes:
result_sets (Sequence[google.cloud.spanner_v1.types.ResultSet]):
One [ResultSet][google.spanner.v1.ResultSet] for each
statement in the request that ran successfully, in the same
order as the statements in the request. Each
[ResultSet][google.spanner.v1.ResultSet] does not contain
any rows. The
[ResultSetStats][google.spanner.v1.ResultSetStats] in each
[ResultSet][google.spanner.v1.ResultSet] contain the number
of rows modified by the statement.
Only the first [ResultSet][google.spanner.v1.ResultSet] in
the response contains valid
[ResultSetMetadata][google.spanner.v1.ResultSetMetadata].
status (google.rpc.status_pb2.Status):
If all DML statements are executed successfully, the status
is ``OK``. Otherwise, the error status of the first failed
statement.
"""
result_sets = proto.RepeatedField(
proto.MESSAGE, number=1, message=result_set.ResultSet,
)
status = proto.Field(proto.MESSAGE, number=2, message=status_pb2.Status,)
class PartitionOptions(proto.Message):
r"""Options for a PartitionQueryRequest and
PartitionReadRequest.
Attributes:
partition_size_bytes (int):
**Note:** This hint is currently ignored by PartitionQuery
and PartitionRead requests.
The desired data size for each partition generated. The
default for this option is currently 1 GiB. This is only a
hint. The actual size of each partition may be smaller or
larger than this size request.
max_partitions (int):
**Note:** This hint is currently ignored by PartitionQuery
and PartitionRead requests.
The desired maximum number of partitions to return. For
example, this may be set to the number of workers available.
The default for this option is currently 10,000. The maximum
value is currently 200,000. This is only a hint. The actual
number of partitions returned may be smaller or larger than
this maximum count request.
"""
partition_size_bytes = proto.Field(proto.INT64, number=1,)
max_partitions = proto.Field(proto.INT64, number=2,)
class PartitionQueryRequest(proto.Message):
r"""The request for
[PartitionQuery][google.spanner.v1.Spanner.PartitionQuery]
Attributes:
session (str):
Required. The session used to create the
partitions.
transaction (google.cloud.spanner_v1.types.TransactionSelector):
Read only snapshot transactions are
supported, read/write and single use
transactions are not.
sql (str):
Required. The query request to generate partitions for. The
request will fail if the query is not root partitionable.
The query plan of a root partitionable query has a single
distributed union operator. A distributed union operator
conceptually divides one or more tables into multiple
splits, remotely evaluates a subquery independently on each
split, and then unions all results.
This must not contain DML commands, such as INSERT, UPDATE,
or DELETE. Use
[ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]
with a PartitionedDml transaction for large,
partition-friendly DML operations.
params (google.protobuf.struct_pb2.Struct):
Parameter names and values that bind to placeholders in the
SQL string.
A parameter placeholder consists of the ``@`` character
followed by the parameter name (for example,
``@firstName``). Parameter names can contain letters,
numbers, and underscores.
Parameters can appear anywhere that a literal value is
expected. The same parameter name can be used more than
once, for example:
``"WHERE id > @msg_id AND id < @msg_id + 100"``
It is an error to execute a SQL statement with unbound
parameters.
param_types (Sequence[google.cloud.spanner_v1.types.PartitionQueryRequest.ParamTypesEntry]):
It is not always possible for Cloud Spanner to infer the
right SQL type from a JSON value. For example, values of
type ``BYTES`` and values of type ``STRING`` both appear in
[params][google.spanner.v1.PartitionQueryRequest.params] as
JSON strings.
In these cases, ``param_types`` can be used to specify the
exact SQL type for some or all of the SQL query parameters.
See the definition of [Type][google.spanner.v1.Type] for
more information about SQL types.
partition_options (google.cloud.spanner_v1.types.PartitionOptions):
Additional options that affect how many
partitions are created.
"""
session = proto.Field(proto.STRING, number=1,)
transaction = proto.Field(
proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector,
)
sql = proto.Field(proto.STRING, number=3,)
params = proto.Field(proto.MESSAGE, number=4, message=struct_pb2.Struct,)
param_types = proto.MapField(
proto.STRING, proto.MESSAGE, number=5, message=gs_type.Type,
)
partition_options = proto.Field(
proto.MESSAGE, number=6, message="PartitionOptions",
)
class PartitionReadRequest(proto.Message):
r"""The request for
[PartitionRead][google.spanner.v1.Spanner.PartitionRead]
Attributes:
session (str):
Required. The session used to create the
partitions.
transaction (google.cloud.spanner_v1.types.TransactionSelector):
Read only snapshot transactions are
supported, read/write and single use
transactions are not.
table (str):
Required. The name of the table in the
database to be read.
index (str):
If non-empty, the name of an index on
[table][google.spanner.v1.PartitionReadRequest.table]. This
index is used instead of the table primary key when
interpreting
[key_set][google.spanner.v1.PartitionReadRequest.key_set]
and sorting result rows. See
[key_set][google.spanner.v1.PartitionReadRequest.key_set]
for further information.
columns (Sequence[str]):
The columns of
[table][google.spanner.v1.PartitionReadRequest.table] to be
returned for each row matching this request.
key_set (google.cloud.spanner_v1.types.KeySet):
Required. ``key_set`` identifies the rows to be yielded.
``key_set`` names the primary keys of the rows in
[table][google.spanner.v1.PartitionReadRequest.table] to be
yielded, unless
[index][google.spanner.v1.PartitionReadRequest.index] is
present. If
[index][google.spanner.v1.PartitionReadRequest.index] is
present, then
[key_set][google.spanner.v1.PartitionReadRequest.key_set]
instead names index keys in
[index][google.spanner.v1.PartitionReadRequest.index].
It is not an error for the ``key_set`` to name rows that do
not exist in the database. Read yields nothing for
nonexistent rows.
partition_options (google.cloud.spanner_v1.types.PartitionOptions):
Additional options that affect how many
partitions are created.
"""
session = proto.Field(proto.STRING, number=1,)
transaction = proto.Field(
proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector,
)
table = proto.Field(proto.STRING, number=3,)
index = proto.Field(proto.STRING, number=4,)
columns = proto.RepeatedField(proto.STRING, number=5,)
key_set = proto.Field(proto.MESSAGE, number=6, message=keys.KeySet,)
partition_options = proto.Field(
proto.MESSAGE, number=9, message="PartitionOptions",
)
class Partition(proto.Message):
r"""Information returned for each partition returned in a
PartitionResponse.
Attributes:
partition_token (bytes):
This token can be passed to Read,
StreamingRead, ExecuteSql, or
ExecuteStreamingSql requests to restrict the
results to those identified by this partition
token.
"""
partition_token = proto.Field(proto.BYTES, number=1,)
class PartitionResponse(proto.Message):
r"""The response for
[PartitionQuery][google.spanner.v1.Spanner.PartitionQuery] or
[PartitionRead][google.spanner.v1.Spanner.PartitionRead]
Attributes:
partitions (Sequence[google.cloud.spanner_v1.types.Partition]):
Partitions created by this request.
transaction (google.cloud.spanner_v1.types.Transaction):
Transaction created by this request.
"""
partitions = proto.RepeatedField(proto.MESSAGE, number=1, message="Partition",)
transaction = proto.Field(
proto.MESSAGE, number=2, message=gs_transaction.Transaction,
)
class ReadRequest(proto.Message):
r"""The request for [Read][google.spanner.v1.Spanner.Read] and
[StreamingRead][google.spanner.v1.Spanner.StreamingRead].
Attributes:
session (str):
Required. The session in which the read
should be performed.
transaction (google.cloud.spanner_v1.types.TransactionSelector):
The transaction to use. If none is provided,
the default is a temporary read-only transaction
with strong concurrency.
table (str):
Required. The name of the table in the
database to be read.
index (str):
If non-empty, the name of an index on
[table][google.spanner.v1.ReadRequest.table]. This index is
used instead of the table primary key when interpreting
[key_set][google.spanner.v1.ReadRequest.key_set] and sorting
result rows. See
[key_set][google.spanner.v1.ReadRequest.key_set] for further
information.
columns (Sequence[str]):
Required. The columns of
[table][google.spanner.v1.ReadRequest.table] to be returned
for each row matching this request.
key_set (google.cloud.spanner_v1.types.KeySet):
Required. ``key_set`` identifies the rows to be yielded.
``key_set`` names the primary keys of the rows in
[table][google.spanner.v1.ReadRequest.table] to be yielded,
unless [index][google.spanner.v1.ReadRequest.index] is
present. If [index][google.spanner.v1.ReadRequest.index] is
present, then
[key_set][google.spanner.v1.ReadRequest.key_set] instead
names index keys in
[index][google.spanner.v1.ReadRequest.index].
If the
[partition_token][google.spanner.v1.ReadRequest.partition_token]
field is empty, rows are yielded in table primary key order
(if [index][google.spanner.v1.ReadRequest.index] is empty)
or index key order (if
[index][google.spanner.v1.ReadRequest.index] is non-empty).
If the
[partition_token][google.spanner.v1.ReadRequest.partition_token]
field is not empty, rows will be yielded in an unspecified
order.
It is not an error for the ``key_set`` to name rows that do
not exist in the database. Read yields nothing for
nonexistent rows.
limit (int):
If greater than zero, only the first ``limit`` rows are
yielded. If ``limit`` is zero, the default is no limit. A
limit cannot be specified if ``partition_token`` is set.
resume_token (bytes):
If this request is resuming a previously interrupted read,
``resume_token`` should be copied from the last
[PartialResultSet][google.spanner.v1.PartialResultSet]
yielded before the interruption. Doing this enables the new
read to resume where the last read left off. The rest of the
request parameters must exactly match the request that
yielded this token.
partition_token (bytes):
If present, results will be restricted to the specified
partition previously created using PartitionRead(). There
must be an exact match for the values of fields common to
this message and the PartitionReadRequest message used to
create this partition_token.
request_options (google.cloud.spanner_v1.types.RequestOptions):
Common options for this request.
"""
session = proto.Field(proto.STRING, number=1,)
transaction = proto.Field(
proto.MESSAGE, number=2, message=gs_transaction.TransactionSelector,
)
table = proto.Field(proto.STRING, number=3,)
index = proto.Field(proto.STRING, number=4,)
columns = proto.RepeatedField(proto.STRING, number=5,)
key_set = proto.Field(proto.MESSAGE, number=6, message=keys.KeySet,)
limit = proto.Field(proto.INT64, number=8,)
resume_token = proto.Field(proto.BYTES, number=9,)
partition_token = proto.Field(proto.BYTES, number=10,)
request_options = proto.Field(proto.MESSAGE, number=11, message="RequestOptions",)
class BeginTransactionRequest(proto.Message):
r"""The request for
[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction].
Attributes:
session (str):
Required. The session in which the
transaction runs.
options (google.cloud.spanner_v1.types.TransactionOptions):
Required. Options for the new transaction.
request_options (google.cloud.spanner_v1.types.RequestOptions):
Common options for this request. Priority is ignored for
this request. Setting the priority in this request_options
struct will not do anything. To set the priority for a
transaction, set it on the reads and writes that are part of
this transaction instead.
"""
session = proto.Field(proto.STRING, number=1,)
options = proto.Field(
proto.MESSAGE, number=2, message=gs_transaction.TransactionOptions,
)
request_options = proto.Field(proto.MESSAGE, number=3, message="RequestOptions",)
class CommitRequest(proto.Message):
r"""The request for [Commit][google.spanner.v1.Spanner.Commit].
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
session (str):
Required. The session in which the
transaction to be committed is running.
transaction_id (bytes):
Commit a previously-started transaction.
This field is a member of `oneof`_ ``transaction``.
single_use_transaction (google.cloud.spanner_v1.types.TransactionOptions):
Execute mutations in a temporary transaction. Note that
unlike commit of a previously-started transaction, commit
with a temporary transaction is non-idempotent. That is, if
the ``CommitRequest`` is sent to Cloud Spanner more than
once (for instance, due to retries in the application, or in
the transport library), it is possible that the mutations
are executed more than once. If this is undesirable, use
[BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]
and [Commit][google.spanner.v1.Spanner.Commit] instead.
This field is a member of `oneof`_ ``transaction``.
mutations (Sequence[google.cloud.spanner_v1.types.Mutation]):
The mutations to be executed when this
transaction commits. All mutations are applied
atomically, in the order they appear in this
list.
return_commit_stats (bool):
If ``true``, then statistics related to the transaction will
be included in the
[CommitResponse][google.spanner.v1.CommitResponse.commit_stats].
Default value is ``false``.
request_options (google.cloud.spanner_v1.types.RequestOptions):
Common options for this request.
"""
session = proto.Field(proto.STRING, number=1,)
transaction_id = proto.Field(proto.BYTES, number=2, oneof="transaction",)
single_use_transaction = proto.Field(
proto.MESSAGE,
number=3,
oneof="transaction",
message=gs_transaction.TransactionOptions,
)
mutations = proto.RepeatedField(proto.MESSAGE, number=4, message=mutation.Mutation,)
return_commit_stats = proto.Field(proto.BOOL, number=5,)
request_options = proto.Field(proto.MESSAGE, number=6, message="RequestOptions",)
class RollbackRequest(proto.Message):
r"""The request for [Rollback][google.spanner.v1.Spanner.Rollback].
Attributes:
session (str):
Required. The session in which the
transaction to roll back is running.
transaction_id (bytes):
Required. The transaction to roll back.
"""
session = proto.Field(proto.STRING, number=1,)
transaction_id = proto.Field(proto.BYTES, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api import extensions as api_extensions
from neutron_lib import constants
from neutron.api import extensions
from neutron.api.v2 import base
from neutron.pecan_wsgi import controllers
from neutron.pecan_wsgi.controllers import utils as pecan_utils
class FakeSingularCollectionExtension(api_extensions.ExtensionDescriptor):
COLLECTION = 'topologies'
RESOURCE = 'topology'
RAM = {
COLLECTION: {
'fake': {'is_visible': True}
}
}
@classmethod
def get_name(cls):
return ""
@classmethod
def get_alias(cls):
return "fake-sc"
@classmethod
def get_description(cls):
return ""
@classmethod
def get_updated(cls):
return "2099-07-23T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return self.RAM
else:
return {}
def get_pecan_controllers(self):
ctrllr = controllers.CollectionsController(
self.RESOURCE, self.RESOURCE)
return [pecan_utils.PecanResourceExtension(self.RESOURCE, ctrllr)]
class FakeSingularCollectionPlugin(object):
supported_extension_aliases = ['fake-sc']
def get_topology(self, context, id_, fields=None):
return {'fake': id_}
def get_topologies(self, context, filters=None, fields=None):
return [{'fake': 'fake'}]
def create_network(context, plugin):
return plugin.create_network(
context,
{'network':
{'name': 'pecannet',
'tenant_id': 'tenid',
'shared': False,
'admin_state_up': True,
'status': 'ACTIVE'}})
def create_subnet(context, plugin, network_id):
return plugin.create_subnet(
context,
{'subnet':
{'tenant_id': 'tenid',
'network_id': network_id,
'name': 'pecansub',
'ip_version': constants.IP_VERSION_4,
'cidr': '10.20.30.0/24',
'gateway_ip': '10.20.30.1',
'enable_dhcp': True,
'allocation_pools': [
{'start': '10.20.30.2',
'end': '10.20.30.254'}],
'dns_nameservers': [],
'host_routes': []}})
def create_router(context, l3_plugin):
return l3_plugin.create_router(
context,
{'router':
{'name': 'pecanrtr',
'tenant_id': 'tenid',
'admin_state_up': True}})
class FakeExtension(api_extensions.ExtensionDescriptor):
HYPHENATED_RESOURCE = 'meh_meh'
HYPHENATED_COLLECTION = HYPHENATED_RESOURCE + 's'
FAKE_PARENT_SUBRESOURCE_COLLECTION = 'fake_duplicates'
FAKE_SUB_RESOURCE_COLLECTION = 'fake_subresources'
RESOURCE_ATTRIBUTE_MAP = {
'meh_mehs': {
'fake': {'is_visible': True}
},
'fake_duplicates': {
'fake': {'is_visible': True}
}
}
SUB_RESOURCE_ATTRIBUTE_MAP = {
'fake_subresources': {
'parent': {
'collection_name': (
'meh_mehs'),
'member_name': 'meh_meh'},
'parameters': {'foo': {'is_visible': True},
'bar': {'is_visible': True}
}
},
'fake_duplicates': {
'parent': {
'collection_name': (
'meh_mehs'),
'member_name': 'meh_meh'},
'parameters': {'fake': {'is_visible': True}
}
}
}
@classmethod
def get_name(cls):
return "fake-ext"
@classmethod
def get_alias(cls):
return "fake-ext"
@classmethod
def get_description(cls):
return ""
@classmethod
def get_updated(cls):
return "meh"
def get_resources(self):
"""Returns Ext Resources."""
resources = []
fake_plugin = FakePlugin()
for collection_name in self.RESOURCE_ATTRIBUTE_MAP:
resource_name = collection_name[:-1]
params = self.RESOURCE_ATTRIBUTE_MAP.get(collection_name, {})
member_actions = {'put_meh': 'PUT', 'boo_meh': 'GET'}
if collection_name == self.HYPHENATED_COLLECTION:
collection_name = collection_name.replace('_', '-')
controller = base.create_resource(
collection_name, resource_name, fake_plugin,
params, allow_bulk=True, allow_pagination=True,
allow_sorting=True, member_actions=member_actions)
resource = extensions.ResourceExtension(
collection_name, controller, attr_map=params)
resources.append(resource)
for collection_name in self.SUB_RESOURCE_ATTRIBUTE_MAP:
resource_name = collection_name[:-1]
parent = self.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
'parent')
params = self.SUB_RESOURCE_ATTRIBUTE_MAP[collection_name].get(
'parameters')
controller = base.create_resource(collection_name, resource_name,
fake_plugin, params,
allow_bulk=True,
parent=parent)
resource = extensions.ResourceExtension(
collection_name,
controller, parent,
path_prefix="",
attr_map=params)
resources.append(resource)
return resources
def get_extended_resources(self, version):
if version == "2.0":
return self.RESOURCE_ATTRIBUTE_MAP
else:
return {}
class FakePlugin(object):
PLUGIN_TYPE = 'fake-ext-plugin'
supported_extension_aliases = ['fake-ext']
@classmethod
def get_plugin_type(cls):
return cls.PLUGIN_TYPE
def get_meh_meh(self, context, id_, fields=None):
return {'fake': id_}
def get_meh_mehs(self, context, filters=None, fields=None):
return [{'fake': 'fake'}]
def get_fake_duplicate(self, context, id_, fields=None):
return {'fake': id_}
def get_fake_duplicates(self, context, filters=None, fields=None):
return [{'fake': 'fakeduplicates'}]
def get_meh_meh_fake_duplicates(self, context, id_, fields=None,
filters=None):
return [{'fake': id_}]
def get_meh_meh_fake_subresources(self, context, id_, fields=None,
filters=None):
return {'foo': id_}
def put_meh(self, context, id_, data):
return {'poo_yah': id_}
def boo_meh(self, context, id_):
return {'boo_yah': id_}
| |
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.sessions
~~~~~~~~~~~~~~~~~~~~~~~~~
This module contains some helper classes that help one to add session
support to a python WSGI application. For full client-side session
storage see :mod:`~werkzeug.contrib.securecookie` which implements a
secure, client-side session storage.
Application Integration
=======================
::
from werkzeug.contrib.sessions import SessionMiddleware, \
FilesystemSessionStore
app = SessionMiddleware(app, FilesystemSessionStore())
The current session will then appear in the WSGI environment as
`werkzeug.session`. However it's recommended to not use the middleware
but the stores directly in the application. However for very simple
scripts a middleware for sessions could be sufficient.
This module does not implement methods or ways to check if a session is
expired. That should be done by a cronjob and storage specific. For
example to prune unused filesystem sessions one could check the modified
time of the files. It sessions are stored in the database the new()
method should add an expiration timestamp for the session.
For better flexibility it's recommended to not use the middleware but the
store and session object directly in the application dispatching::
session_store = FilesystemSessionStore()
def application(environ, start_response):
request = Request(environ)
sid = request.cookie.get('cookie_name')
if sid is None:
request.session = session_store.new()
else:
request.session = session_store.get(sid)
response = get_the_response_object(request)
if request.session.should_save:
session_store.save(request.session)
response.set_cookie('cookie_name', request.session.sid)
return response(environ, start_response)
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import os
from os import path
from time import time
from random import random
try:
from hashlib import sha1
except ImportError:
from sha import new as sha1
from cPickle import dump, load, HIGHEST_PROTOCOL
from werkzeug.utils import ClosingIterator, dump_cookie, parse_cookie
from werkzeug.datastructures import CallbackDict
_sha1_re = re.compile(r'^[a-fA-F0-9]{40}$')
def _urandom():
if hasattr(os, 'urandom'):
return os.urandom(30)
return random()
def generate_key(salt=None):
return sha1('%s%s%s' % (salt, time(), _urandom())).hexdigest()
class ModificationTrackingDict(CallbackDict):
__slots__ = ('modified',)
def __init__(self, *args, **kwargs):
def on_update(self):
self.modified = True
self.modified = False
CallbackDict.__init__(self, on_update=on_update)
dict.update(self, *args, **kwargs)
def copy(self):
"""Create a flat copy of the dict."""
missing = object()
result = object.__new__(self.__class__)
for name in self.__slots__:
val = getattr(self, name, missing)
if val is not missing:
setattr(result, name, val)
return result
def __copy__(self):
return self.copy()
class Session(ModificationTrackingDict):
"""Subclass of a dict that keeps track of direct object changes. Changes
in mutable structures are not tracked, for those you have to set
`modified` to `True` by hand.
"""
__slots__ = ModificationTrackingDict.__slots__ + ('sid', 'new')
def __init__(self, data, sid, new=False):
ModificationTrackingDict.__init__(self, data)
self.sid = sid
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved."""
return self.modified or self.new
class SessionStore(object):
"""Baseclass for all session stores. The Werkzeug contrib module does not
implement any useful stores besides the filesystem store, application
developers are encouraged to create their own stores.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, session_class=None):
if session_class is None:
session_class = Session
self.session_class = session_class
def is_valid_key(self, key):
"""Check if a key has the correct format."""
return _sha1_re.match(key) is not None
def generate_key(self, salt=None):
"""Simple function that generates a new session key."""
return generate_key(salt)
def new(self):
"""Generate a new session."""
return self.session_class({}, self.generate_key(), True)
def save(self, session):
"""Save a session."""
def save_if_modified(self, session):
"""Save if a session class wants an update."""
if session.should_save:
self.save(session)
def delete(self, session):
"""Delete a session."""
def get(self, sid):
"""Get a session for this sid or a new session object. This method
has to check if the session key is valid and create a new session if
that wasn't the case.
"""
return self.session_class({}, sid, True)
class FilesystemSessionStore(SessionStore):
"""Simple example session store that saves sessions in the filesystem like
PHP does.
:param path: the path to the folder used for storing the sessions.
If not provided the default temporary directory is used.
:param filename_template: a string template used to give the session
a filename. ``%s`` is replaced with the
session id.
:param session_class: The session class to use. Defaults to
:class:`Session`.
"""
def __init__(self, path=None, filename_template='werkzeug_%s.sess',
session_class=None):
SessionStore.__init__(self, session_class)
if path is None:
from tempfile import gettempdir
path = gettempdir()
self.path = path
self.filename_template = filename_template
def get_session_filename(self, sid):
return path.join(self.path, self.filename_template % sid)
def save(self, session):
f = file(self.get_session_filename(session.sid), 'wb')
try:
dump(dict(session), f, HIGHEST_PROTOCOL)
finally:
f.close()
def delete(self, session):
fn = self.get_session_filename(session.sid)
try:
# Late import because Google Appengine won't allow os.unlink
from os import unlink
unlink(fn)
except OSError:
pass
def get(self, sid):
fn = self.get_session_filename(sid)
if not self.is_valid_key(sid) or not path.exists(fn):
return self.new()
else:
f = file(fn, 'rb')
try:
data = load(f)
finally:
f.close()
return self.session_class(data, sid, False)
class SessionMiddleware(object):
"""A simple middleware that puts the session object of a store provided
into the WSGI environ. It automatically sets cookies and restores
sessions.
However a middleware is not the preferred solution because it won't be as
fast as sessions managed by the application itself and will put a key into
the WSGI environment only relevant for the application which is against
the concept of WSGI.
The cookie parameters are the same as for the :func:`~werkzeug.dump_cookie`
function just prefixed with ``cookie_``. Additionally `max_age` is
called `cookie_age` and not `cookie_max_age` because of backwards
compatibility.
"""
def __init__(self, app, store, cookie_name='session_id',
cookie_age=None, cookie_expires=None, cookie_path='/',
cookie_domain=None, cookie_secure=None,
cookie_httponly=False, environ_key='werkzeug.session'):
self.app = app
self.store = store
self.cookie_name = cookie_name
self.cookie_age = cookie_age
self.cookie_expires = cookie_expires
self.cookie_path = cookie_path
self.cookie_domain = cookie_domain
self.cookie_secure = cookie_secure
self.cookie_httponly = cookie_httponly
self.environ_key = environ_key
def __call__(self, environ, start_response):
cookie = parse_cookie(environ.get('HTTP_COOKIE', ''))
sid = cookie.get(self.cookie_name, None)
if sid is None:
session = self.store.new()
else:
session = self.store.get(sid)
environ[self.environ_key] = session
def injecting_start_response(status, headers, exc_info=None):
if session.should_save:
self.store.save(session)
headers.append(('Set-Cookie', dump_cookie(self.cookie_name,
session.sid, self.cookie_age,
self.cookie_expires, self.cookie_path,
self.cookie_domain, self.cookie_secure,
self.cookie_httponly)))
return start_response(status, headers, exc_info)
return ClosingIterator(self.app(environ, injecting_start_response),
lambda: self.store.save_if_modified(session))
| |
"""
Control of SSH known_hosts entries
==================================
Manage the information stored in the known_hosts files.
.. code-block:: yaml
github.com:
ssh_known_hosts:
- present
- user: root
- fingerprint: 16:27:ac:a5:76:28:2d:36:63:1b:56:4d:eb:df:a6:48
- fingerprint_hash_type: md5
example.com:
ssh_known_hosts:
- absent
- user: root
"""
import os
import salt.utils.platform
from salt.exceptions import CommandNotFoundError
# Define the state's virtual name
__virtualname__ = "ssh_known_hosts"
def __virtual__():
"""
Does not work on Windows, requires ssh module functions
"""
if salt.utils.platform.is_windows():
return False, "ssh_known_hosts: Does not support Windows"
return __virtualname__
def present(
name,
user=None,
fingerprint=None,
key=None,
port=None,
enc=None,
config=None,
hash_known_hosts=True,
timeout=5,
fingerprint_hash_type=None,
):
"""
Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
Note that only a single hostname is supported, if foo.example.com and
bar.example.com have the same host you will need two separate Salt
States to represent them.
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa,
ssh-rsa, ssh-dss or any other type as of openssh server version 8.7.
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``sha256`` if not specified.
.. versionadded:: 2016.11.4
.. versionchanged:: 2017.7.0
default changed from ``md5`` to ``sha256``
"""
ret = {
"name": name,
"changes": {},
"result": None if __opts__["test"] else True,
"comment": "",
}
if not user:
config = config or "/etc/ssh/ssh_known_hosts"
else:
config = config or ".ssh/known_hosts"
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret["result"] = False
return dict(ret, comment=comment)
if __opts__["test"]:
if key and fingerprint:
comment = 'Specify either "key" or "fingerprint", not both.'
ret["result"] = False
return dict(ret, comment=comment)
elif key and not enc:
comment = 'Required argument "enc" if using "key" argument.'
ret["result"] = False
return dict(ret, comment=comment)
try:
result = __salt__["ssh.check_known_host"](
user,
name,
key=key,
fingerprint=fingerprint,
config=config,
port=port,
fingerprint_hash_type=fingerprint_hash_type,
)
except CommandNotFoundError as err:
ret["result"] = False
ret["comment"] = "ssh.check_known_host error: {}".format(err)
return ret
if result == "exists":
comment = "Host {} is already in {}".format(name, config)
ret["result"] = True
return dict(ret, comment=comment)
elif result == "add":
comment = "Key for {} is set to be added to {}".format(name, config)
return dict(ret, comment=comment)
else: # 'update'
comment = "Key for {} is set to be updated in {}".format(name, config)
return dict(ret, comment=comment)
result = __salt__["ssh.set_known_host"](
user=user,
hostname=name,
fingerprint=fingerprint,
key=key,
port=port,
enc=enc,
config=config,
hash_known_hosts=hash_known_hosts,
timeout=timeout,
fingerprint_hash_type=fingerprint_hash_type,
)
if result["status"] == "exists":
return dict(ret, comment="{} already exists in {}".format(name, config))
elif result["status"] == "error":
return dict(ret, result=False, comment=result["error"])
else: # 'updated'
if key:
new_key = result["new"][0]["key"]
return dict(
ret,
changes={"old": result["old"], "new": result["new"]},
comment="{}'s key saved to {} (key: {})".format(name, config, new_key),
)
else:
fingerprint = result["new"][0]["fingerprint"]
return dict(
ret,
changes={"old": result["old"], "new": result["new"]},
comment="{}'s key saved to {} (fingerprint: {})".format(
name, config, fingerprint
),
)
def absent(name, user=None, config=None):
"""
Verifies that the specified host is not known by the given user
name
The host name
Note that only single host names are supported. If foo.example.com
and bar.example.com are the same machine and you need to exclude both,
you will need one Salt state for each.
user
The user who owns the ssh authorized keys file to modify
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
"""
ret = {"name": name, "changes": {}, "result": True, "comment": ""}
if not user:
config = config or "/etc/ssh/ssh_known_hosts"
else:
config = config or ".ssh/known_hosts"
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret["result"] = False
return dict(ret, comment=comment)
known_host = __salt__["ssh.get_known_host_entries"](
user=user, hostname=name, config=config
)
if not known_host:
return dict(ret, comment="Host is already absent")
if __opts__["test"]:
comment = "Key for {} is set to be removed from {}".format(name, config)
ret["result"] = None
return dict(ret, comment=comment)
rm_result = __salt__["ssh.rm_known_host"](user=user, hostname=name, config=config)
if rm_result["status"] == "error":
return dict(ret, result=False, comment=rm_result["error"])
else:
return dict(
ret,
changes={"old": known_host, "new": None},
result=True,
comment=rm_result["comment"],
)
| |
"""
Build an AWS AMI for this service
"""
import os
import sys
import time
import subprocess
import operator
import pkg_resources
import json
from datetime import datetime, timedelta
import pytz
import random
import shlex
import tempfile
from click import echo, secho
import six
from six import print_
try:
# boto library is not a hard requirement for drift.
import boto3
except ImportError:
pass
from drift.management import get_app_version, create_deployment_manifest
from drift.management.gittools import get_branch, checkout
from drift.utils import get_tier_name
from driftconfig.util import get_drift_config
from driftconfig.config import get_redis_cache_backend
from drift.flaskfactory import load_flask_config
UBUNTU_TRUSTY_IMAGE_NAME = 'ubuntu/images/hvm/ubuntu-trusty-14.04*'
UBUNTU_XENIAL_IMAGE_NAME = 'ubuntu/images/hvm-ssd/ubuntu-xenial-16.04*'
UBUNTU_BIONIC_IMAGE_NAME = 'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04*'
UBUNTU_RELEASE = UBUNTU_BIONIC_IMAGE_NAME
IAM_ROLE = "ec2"
# The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
def _get_owner_id_for_canonical(region_id):
"""Returns region specific owner id for Canonical which is the maintainer of Ubuntu images."""
if region_id.startswith('cn-'):
return '837727238323'
else:
return '099720109477'
def get_options(parser):
subparsers = parser.add_subparsers(
title="AWS AMI Management and Deployment",
description="These sets of commands help you with configuring, baking, running and "
"deploying AWS AMIs' for your tier.",
dest="command",
)
subparsers.required = True
# The 'bake' command
p = subparsers.add_parser(
'bake',
help='Bake a new AMI for the current current service.',
)
p.add_argument(
'tag', action='store', help='Git release tag to bake. (Run "git tag" to get available tags).',
nargs='?', default=None)
p.add_argument(
"--preview", help="Show arguments only", action="store_true"
)
p.add_argument(
"--skipcopy", help="Do not copy image to all regions", action="store_true"
)
p.add_argument(
"--debug", help="Run Packer in debug mode", action="store_true"
)
p.add_argument(
"--ami",
help="Specify base image. Default is the latest Ubuntu image from Canonical."
)
p.add_argument(
"--instance_type",
help="The EC2 instance type to use to build. Default is 'm4.xlarge'.",
default="m4.xlarge"
)
# The 'run' command
p = subparsers.add_parser(
'run',
help='Launch an AMI for this service, or configure it for auto-scaling.',
)
p.add_argument(
"--ami",
help="An AMI built with the rest api service",
)
p.add_argument(
"--instance_type",
help="The EC2 instance type to use. Default is 't2.small'.",
default="t2.small"
)
p.add_argument(
"--launch", action="store_true",
help="Launch the AMI. (Default unless \"autoscaling\" is configured for the tier and service.)",
)
p.add_argument(
"--autoscale", action="store_true",
help="Add the AMI to autoscaling group. (Default if \"autoscaling\" is configured for the tier and service.)",
)
p.add_argument(
"--force", action="store_true",
help="If --launch or --autoscale conflict with tier configuration, use --force to override.",
)
p.add_argument(
"--preview", help="Show arguments only", action="store_true"
)
p.add_argument(
"--verbose", help="Verbose output", action="store_true"
)
# The 'copy-image' command
p = subparsers.add_parser(
'copy-image',
help='Copies AMI to all active regions.',
)
p.add_argument(
"ami",
help="The image id.",
)
def run_command(args):
fn = globals()["_{}_command".format(args.command.replace("-", "_"))]
fn(args)
def fold_tags(tags, key_name=None, value_name=None):
"""Fold boto3 resource tags array into a dictionary."""
return {tag['Key']: tag['Value'] for tag in tags}
def filterize(d):
"""
Return dictionary 'd' as a boto3 "filters" object by unfolding it to a list of
dict with 'Name' and 'Values' entries.
"""
return [{'Name': k, 'Values': [v]} for k, v in d.items()]
def _bake_command(args):
conf = get_drift_config(drift_app=load_flask_config())
name = conf.drift_app['name']
domain = conf.domain.get()
if 'aws' not in domain or 'ami_baking_region' not in domain['aws']:
echo(
"Missing configuration value in table 'domain'. Specify the AWS region in "
"'aws.ami_baking_region'.")
sys.exit(1)
aws_region = domain['aws']['ami_baking_region']
ec2 = boto3.resource('ec2', region_name=aws_region)
# Do some compatibility checks
if aws_region.startswith("cn-"):
echo("NOTE!!!!!!!!!!!!!!!!!!!!!!!!!!!")
echo("If this command fails with some AWS access crap, you may need to switch packer version.")
echo("More info: https://github.com/hashicorp/packer/issues/5447")
echo("DOMAIN:")
echo(json.dumps(domain, indent=4))
echo("DEPLOYABLE: {!r}".format(name))
echo("AWS REGION: {!r}".format(aws_region))
# Clean up lingering Packer instances
tag_name = 'Packer Builder'
ec2_client = boto3.client('ec2', region_name=aws_region)
packers = ec2_client.describe_instances(
Filters=[
{'Name': 'tag:Name', 'Values': ['Packer Builder']},
{'Name': 'instance-state-name', 'Values': ['running']}
]
)
terminate_ids = []
if packers['Reservations']:
for pc2 in packers['Reservations'][0]['Instances']:
age = datetime.utcnow().replace(tzinfo=pytz.utc) - pc2['LaunchTime']
if age > timedelta(minutes=20):
echo("Cleaning up Packer instance {} as it has been active for {}.".format(
pc2['InstanceId'], age))
terminate_ids.append(pc2['InstanceId'])
if terminate_ids:
ec2.instances.filter(InstanceIds=terminate_ids).terminate()
if args.ami:
amis = list(ec2.images.filter(ImageIds=[args.ami]))
ami = amis[0]
else:
# Get all Ubuntu images from the appropriate region and pick the most recent one.
# The 'Canonical' owner. This organization maintains the Ubuntu AMI's on AWS.
echo("Finding the latest AMI on AWS that matches {!r}".format(UBUNTU_RELEASE))
filters = [
{'Name': 'name', 'Values': [UBUNTU_RELEASE]},
{'Name': 'architecture', 'Values': ['x86_64']},
]
amis = list(ec2.images.filter(Owners=[_get_owner_id_for_canonical(aws_region)], Filters=filters))
if not amis:
echo("No AMI found matching {!r}. Not sure what to do now.".format(UBUNTU_RELEASE))
sys.exit(1)
ami = max(amis, key=operator.attrgetter("creation_date"))
echo("Using source AMI:")
echo("\tID:\t{!r}".format(ami.id))
echo("\tName:\t{!r}".format(ami.name))
echo("\tDate:\t{!r}".format(ami.creation_date))
current_branch = get_branch()
if not args.tag:
args.tag = current_branch
echo("Using branch/tag {!r}".format(args.tag))
# Wrap git branch modification in RAII.
checkout(args.tag)
try:
setup_script = ""
setup_script_custom = ""
with open(pkg_resources.resource_filename(__name__, "driftapp-packer.sh"), 'r') as f:
setup_script = f.read()
custom_script_name = os.path.join(conf.drift_app['app_root'], 'scripts', 'ami-bake.sh')
if os.path.exists(custom_script_name):
echo("Using custom bake shell script {!r}".format(custom_script_name))
setup_script_custom = "echo Executing custom bake shell script from {}\n".format(custom_script_name)
setup_script_custom += open(custom_script_name, 'r').read()
setup_script_custom += "\necho Custom bake shell script completed\n"
else:
echo("Note: No custom ami-bake.sh script found for this application.")
# custom setup needs to happen first because we might be installing some requirements for the regular setup
setup_script = setup_script_custom + setup_script
with tempfile.NamedTemporaryFile('w', delete=False) as tf:
tf.write(setup_script)
setup_script_filename = tf.name
manifest = create_deployment_manifest('ami', comment=None, deployable_name=name)
packer_vars = {
'version': get_app_version(),
'setup_script': setup_script_filename,
}
if not args.preview:
# TODO: This code mirrors the one in ami.py. It's not DRY.
cmd = [sys.executable, 'setup.py', 'sdist', '--formats=tar']
ret = subprocess.call(cmd)
if ret != 0:
secho("Failed to execute build command: {!r}".format(cmd), fg="red")
sys.exit(ret)
import shutil
shutil.make_archive("dist/aws", 'tar', "aws")
finally:
echo("Reverting to {!r}".format(current_branch))
checkout(current_branch)
packer_vars.update({
"instance_type": args.instance_type,
"service": name,
"region": aws_region,
"source_ami": ami.id,
"user_name": boto3.client('sts').get_caller_identity()['Arn'],
"domain_name": domain['domain_name'],
})
echo("Packer variables:")
echo(pretty(packer_vars))
# See if Packer is installed and generate sensible error code if something is off.
# This will also write the Packer version to the terminal which is useful info.
try:
subprocess.call(['packer', 'version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except Exception as e:
echo("Error: {}".format(e))
echo("'packer version' command failed. Make sure it's installed.")
if sys.platform == 'win32':
echo("To install packer for windows: choco install packer")
sys.exit(127)
cmd = "packer build "
if args.debug:
cmd += "-debug "
cmd += "-only=amazon-ebs "
for k, v in packer_vars.items():
cmd += "-var {}=\"{}\" ".format(k, v)
cmd = shlex.split(cmd)
# Use generic packer script if project doesn't specify one
pkg_resources.cleanup_resources()
if os.path.exists("config/packer.json"):
cmd.append("config/packer.json")
else:
scriptfile = pkg_resources.resource_filename(__name__, "driftapp-packer.json")
cmd.append(scriptfile)
echo("Baking AMI with: {}".format(' '.join(cmd)))
if args.preview:
echo("Manifest tags assigned to AMI:")
echo(pretty(manifest))
echo("Not building or packaging because --preview is on. Exiting now.")
sys.exit(0)
start_time = time.time()
failure_line = None
try:
# Execute Packer command and parse the output to find the ami id.
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
while True:
line = p.stdout.readline()
# packer is streaming stuff from the remote which uses utf-8 encoding.
# in py2, we just leave the line as it is, gobble it and print it.
if six.PY3:
line = line.decode()
print_(line, end="")
if line == '' and p.poll() is not None:
break
# The last lines from the packer execution look like this:
# ==> Builds finished. The artifacts of successful builds are:
# --> amazon-ebs: AMIs were created:
#
# eu-west-1: ami-0ee5eb68
if 'ami-' in line:
ami_id = line[line.rfind('ami-'):].strip()
ami = ec2.Image(ami_id)
echo()
echo("AMI ID: %s" % ami.id)
echo()
if "failed with error code" in line:
failure_line = line
if "Your Pipfile.lock" in line and "is out of date" in line:
secho("ERROR: {}".format(line), fg="red")
secho("Build failed! Consider running 'pipenv lock' before baking.", fg="red")
sys.exit(1)
if "Creating a Pipfile for this project" in line:
secho("ERROR: No Pipfile in distribution! Check 'MANIFEST.in'.", fg="red")
sys.exit(1)
if "Pipfile.lock not found, creating" in line:
secho("ERROR: No Pipfile.lock in distribution! Check 'MANIFEST.in'.", fg="red")
sys.exit(1)
finally:
pkg_resources.cleanup_resources()
if p.returncode != 0:
secho("Failed to execute packer command: {!r}".format(cmd), fg="red")
if failure_line:
secho("Check this out: {!r}".format(failure_line), fg="yellow")
sys.exit(p.returncode)
duration = time.time() - start_time
if manifest:
echo("Adding manifest tags to AMI:")
echo(pretty(manifest))
prefix = "drift:manifest:"
tags = []
for k, v in manifest.items():
tag_name = "{}{}".format(prefix, k)
tags.append({'Key': tag_name, 'Value': v or ''})
ami.create_tags(DryRun=False, Tags=tags)
if not args.skipcopy:
_copy_image(ami.id)
secho("Done after %.0f seconds" % (duration,), fg="green")
class MyEncoder(json.JSONEncoder):
def default(self, o):
return str(o)
def pretty(ob):
"""Returns a pretty representation of 'ob'."""
return json.dumps(ob, cls=MyEncoder, indent=4)
def _find_latest_ami(service_name, release=None):
tier_name = get_tier_name()
conf = get_drift_config(tier_name=tier_name, deployable_name=service_name)
domain = conf.domain.get()
aws_region = conf.tier['aws']['region']
ec2 = boto3.resource('ec2', region_name=aws_region)
filters = [
{'Name': 'tag:service-name', 'Values': [service_name]},
{'Name': 'tag:domain-name', 'Values': [domain['domain_name']]},
]
if release:
filters.append({'Name': 'tag:git-release', 'Values': [release]},)
amis = list(ec2.images.filter(Owners=['self'], Filters=filters))
if not amis:
criteria = {d['Name']: d['Values'][0] for d in filters}
secho("No AMI found using the search criteria {}.".format(criteria), fg="red")
sys.exit(1)
ami = max(amis, key=operator.attrgetter("creation_date"))
return ami
def _run_command(args):
# Always autoscale!
args.autoscale = True
if args.launch and args.autoscale:
secho("Error: Can't use --launch and --autoscale together.", fg="red")
sys.exit(1)
tier_name = get_tier_name()
conf = get_drift_config(tier_name=tier_name, drift_app=load_flask_config())
name = conf.drift_app['name']
if not conf.deployable:
echo("The deployable '{}' is not registered and/or assigned to tier {!r}.".format(name, tier_name))
echo("Run 'drift-admin register' to register this deployable.")
echo("Run 'driftconfig assign-tier {}' to assign it to the tier.".format(name))
sys.exit(1)
aws_region = conf.tier['aws']['region']
if args.verbose:
echo("AWS REGION: {!r}".format(aws_region))
echo("DOMAIN:")
echo(json.dumps(conf.domain.get(), indent=4))
echo("DEPLOYABLE:")
echo(json.dumps(conf.deployable, indent=4))
ec2_conn = boto3.resource('ec2', region_name=aws_region)
if conf.tier['is_live']:
secho("NOTE! This tier is marked as LIVE. Special restrictions may apply. Use --force to override.", fg="yellow")
autoscaling = {
"min": 1,
"max": 1,
"desired": 1,
"instance_type": args.instance_type,
}
autoscaling.update(conf.deployable.get('autoscaling', {}))
release = conf.deployable.get('release', '')
if args.launch and autoscaling and not args.force:
secho("--launch specified, but tier config specifies 'use_autoscaling'. Use --force to ovefrride.", fg="red")
sys.exit(1)
if args.autoscale and not autoscaling and not args.force:
secho("--autoscale specified, but tier config doesn't specify 'use_autoscaling'. Use --force to override.", fg="red")
sys.exit(1)
echo("Launch an instance of {!r} on tier {!r}".format(name, tier_name))
if release:
echo("Using AMI with release tag: {!r}".format(release))
else:
echo("Using the newest AMI baked (which may not be what you expect).")
ami = _find_latest_ami(name, release)
echo("AMI: {} [{}]".format(ami.id, ami.name))
if args.ami:
echo("Using a specified AMI: {!r}".format(args.ami))
ec2 = boto3.resource('ec2', region_name=aws_region)
if ami.id != args.ami:
secho("AMI found is different from AMI specified on command line.", fg="yellow")
if conf.tier['is_live'] and not args.force:
secho("This is a live tier. Can't run mismatched AMI unless --force is specified", fg="red")
sys.exit(1)
try:
ami = ec2.Image(args.ami)
except Exception as e:
raise RuntimeError("Ami '%s' not found or broken: %s" % (args.ami, e))
if not ami:
sys.exit(1)
ami_info = dict(
ami_id=ami.id,
ami_name=ami.name,
ami_created=ami.creation_date,
ami_tags={d['Key']: d['Value'] for d in ami.tags},
)
if args.verbose:
echo("AMI Info:")
echo(pretty(ami_info))
if autoscaling:
echo("Autoscaling group:")
echo(pretty(autoscaling))
else:
echo("EC2:")
echo("\tInstance Type:\t{}".format(args.instance_type))
ec2 = boto3.resource('ec2', region_name=aws_region)
# Get all 'private' subnets
filters = {'tag:tier': tier_name, 'tag:realm': 'private'}
subnets = list(ec2.subnets.filter(Filters=filterize(filters)))
if not subnets:
secho("Error: No subnet available matching filter {}".format(filters), fg="red")
sys.exit(1)
# Get the "one size fits all" security group
filters = {'tag:tier': tier_name, 'tag:Name': '{}-private-sg'.format(tier_name)}
security_group = list(ec2.security_groups.filter(Filters=filterize(filters)))[0]
# The key pair name for SSH
key_name = conf.tier['aws']['ssh_key']
if "." in key_name:
key_name = key_name.split(".", 1)[0] # TODO: Distinguish between key name and .pem key file name
'''
autoscaling group:
Name LIVENORTH-themachines-backend-auto
api-port 10080
api-target themachines-backend
service-name themachines-backend
service-type rest-api
tier LIVENORTH
ec2:
Name DEVNORTH-drift-base
launched-by nonnib
api-port 10080
api-target drift-base
service-name drift-base
service-type rest-api
tier DEVNORTH
'''
target_name = "{}-{}".format(tier_name, name)
if autoscaling:
target_name += "-auto"
# To auto-generate Redis cache url, we create the Redis backend using our config,
# and then ask for a url representation of it:
drift_config_url = get_redis_cache_backend(conf.table_store, tier_name).get_url()
# Specify the app
app_root = '/etc/opt/{service_name}'.format(service_name=name)
tags = {
"Name": target_name,
"tier": tier_name,
"service-name": name,
"service-type": conf.drift_app.get('service_type', 'web-app'),
"config-url": drift_config_url,
"app-root": app_root,
"launched-by": boto3.client('sts').get_caller_identity()['Arn'],
}
if tags['service-type'] == 'web-app':
# Make instance part of api-router round-robin load balancing
tags.update(
{
"api-target": name,
"api-port": str(conf.drift_app.get('PORT', 10080)),
"api-status": "online",
}
)
tags.update(fold_tags(ami.tags))
user_data = '''#!/bin/bash
# Environment variables set by drift-admin run command:
export DRIFT_CONFIG_URL={drift_config_url}
export DRIFT_TIER={tier_name}
export DRIFT_APP_ROOT={app_root}
export DRIFT_SERVICE={service_name}
export AWS_REGION={aws_region}
# Shell script from ami-run.sh:
'''.format(drift_config_url=drift_config_url, tier_name=tier_name, app_root=app_root, service_name=name, aws_region=aws_region)
user_data += pkg_resources.resource_string(__name__, "ami-run.sh").decode()
custom_script_name = os.path.join(conf.drift_app['app_root'], 'scripts', 'ami-run.sh')
if os.path.exists(custom_script_name):
echo("Using custom shell script {!r}".format(custom_script_name))
user_data += "\n# Custom shell script from {}\n".format(custom_script_name)
user_data += open(custom_script_name, 'r').read()
else:
echo("Note: No custom ami-run.sh script found for this application.")
if args.verbose:
echo("Subnets:")
for subnet in subnets:
echo("\t{} - {}".format(fold_tags(subnet.tags)['Name'], subnet.id))
echo("Security Group:\n\t{} [{} {}]".format(fold_tags(security_group.tags)["Name"], security_group.id, security_group.vpc_id))
echo("SSH Key:")
echo(key_name)
echo("Tags:")
for k in sorted(tags.keys()):
echo(" %s: %s" % (k, tags[k]))
echo("user_data:")
from drift.utils import pretty as poo
echo(poo(user_data, 'bash'))
if args.preview:
echo("--preview specified, exiting now before actually doing anything.")
sys.exit(0)
user_data = user_data.replace('\r\n', '\n')
if autoscaling:
client = boto3.client('autoscaling', region_name=aws_region)
launch_config_name = '{}-{}-launchconfig-{}-{}'.format(tier_name, name, datetime.utcnow(), release)
launch_config_name = launch_config_name.replace(':', '.')
kwargs = dict(
LaunchConfigurationName=launch_config_name,
ImageId=ami.id,
KeyName=key_name,
SecurityGroups=[security_group.id],
InstanceType=args.instance_type or autoscaling['instance_type'],
IamInstanceProfile=IAM_ROLE,
InstanceMonitoring={'Enabled': True},
UserData=user_data,
)
if args.verbose:
echo("Creating launch configuration using params:")
echo(pretty(kwargs))
else:
echo("Creating launch configuration: {}".format(launch_config_name))
client.create_launch_configuration(**kwargs)
# Update current autoscaling group or create a new one if it doesn't exist.
groups = client.describe_auto_scaling_groups(AutoScalingGroupNames=[target_name])
kwargs = dict(
AutoScalingGroupName=target_name,
LaunchConfigurationName=launch_config_name,
MinSize=autoscaling['min'],
MaxSize=autoscaling['max'],
DesiredCapacity=autoscaling['desired'],
VPCZoneIdentifier=','.join([subnet.id for subnet in subnets]),
)
if not groups['AutoScalingGroups']:
echo("Creating a new autoscaling group using params:")
echo(pretty(kwargs))
client.create_auto_scaling_group(**kwargs)
else:
echo("Updating current autoscaling group {!r}".format(target_name))
client.update_auto_scaling_group(**kwargs)
# Prepare tags which get propagated to all new instances
tagsarg = [
{
'ResourceId': tags['Name'],
'ResourceType': 'auto-scaling-group',
'Key': k,
'Value': v,
'PropagateAtLaunch': True,
}
for k, v in tags.items()
]
echo("Updating tags on autoscaling group that get propagated to all new instances.")
client.create_or_update_tags(Tags=tagsarg)
# Define a 2 min termination cooldown so api-router can drain the connections.
echo("Configuring lifecycle hook.")
response = client.put_lifecycle_hook(
LifecycleHookName='Wait-2-minutes-on-termination',
AutoScalingGroupName=target_name,
LifecycleTransition='autoscaling:EC2_INSTANCE_TERMINATING',
HeartbeatTimeout=120,
DefaultResult='CONTINUE'
)
echo("Terminating instances in autoscaling group. New ones will be launched.")
echo("Old instances will linger for 2 minutes while connections are drained.")
asg = client.describe_auto_scaling_groups(AutoScalingGroupNames=[target_name])
for instance in asg['AutoScalingGroups'][0]['Instances']:
response = client.terminate_instance_in_auto_scaling_group(
InstanceId=instance['InstanceId'],
ShouldDecrementDesiredCapacity=False
)
echo(" " + response['Activity']['Description'])
secho("Done!", fg="green")
else:
# Pick a random subnet from list of available subnets
subnet = random.choice(subnets)
echo("Randomly picked this subnet to use: {!r}".format(subnet))
echo("Launching EC2 instance...")
reservation = ec2_conn.run_instances(
ami.id,
instance_type=args.instance_type,
subnet_id=subnet.id,
security_group_ids=[security_group.id],
key_name=key_name,
instance_profile_name=IAM_ROLE,
user_data=user_data,
)
if len(reservation.instances) == 0:
secho("No instances in reservation!", fg="red")
sys.exit(1)
instance = reservation.instances[0]
echo("{} starting up...".format(instance))
# Check up on its status every so often
status = instance.update()
while status == 'pending':
time.sleep(10)
status = instance.update()
if status == 'running':
for k, v in tags.items():
instance.add_tag(k, v)
echo("{} running at {}".format(instance, instance.private_ip_address))
else:
secho("Instance was not created correctly", fg="red")
sys.exit(1)
def _copy_image_command(args):
_copy_image(args.ami)
def _copy_image(ami_id):
conf = get_drift_config()
domain = conf.domain.get()
aws_region = domain['aws']['ami_baking_region']
# Grab the source AMI
source_ami = boto3.resource('ec2', region_name=aws_region).Image(ami_id)
# Create a list of all regions that are active
active_tiers = conf.table_store.get_table('tiers').find({'state': 'active'})
regions = set([tier['aws']['region'] for tier in active_tiers if 'aws' in tier])
if aws_region in regions:
regions.remove(aws_region) # This is the source region
echo("Distributing {} to region(s) {}.".format(source_ami.id, ', '.join(regions)))
jobs = []
for region_id in regions:
ec2_client = boto3.client('ec2', region_name=region_id)
ret = ec2_client.copy_image(
SourceRegion=aws_region,
SourceImageId=source_ami.id,
Name=source_ami.name or "",
Description=source_ami.description or "",
)
job = {
'id': ret['ImageId'],
'region_id': region_id,
'client': ec2_client,
}
jobs.append(job)
# Wait on jobs and copy tags
for job in jobs:
ami = boto3.resource('ec2', region_name=job['region_id']).Image(job['id'])
echo("Waiting on {}...".format(ami.id))
ami.wait_until_exists(Filters=[{'Name': 'state', 'Values': ['available']}])
if ami.state != 'available':
continue
echo("AMI {id} in {region_id} is available. Copying tags...".format(**job))
job['client'].create_tags(Resources=[job['id']], Tags=source_ami.tags)
secho("All done.", fg="green")
| |
import re
import lldb.formatters.Logger
# C++ STL formatters for LLDB
# These formatters are based upon the version of the GNU libstdc++
# as it ships with Mac OS X 10.6.8 thru 10.8.0
# You are encouraged to look at the STL implementation for your platform
# before relying on these formatters to do the right thing for your setup
class StdListSynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.count = None
logger >> "Providing synthetic children for a list named " + \
str(valobj.GetName())
def next_node(self, node):
logger = lldb.formatters.Logger.Logger()
return node.GetChildMemberWithName('_M_next')
def is_valid(self, node):
logger = lldb.formatters.Logger.Logger()
valid = self.value(self.next_node(node)) != self.node_address
if valid:
logger >> "%s is valid" % str(self.valobj.GetName())
else:
logger >> "synthetic value is not valid"
return valid
def value(self, node):
logger = lldb.formatters.Logger.Logger()
value = node.GetValueAsUnsigned()
logger >> "synthetic value for {}: {}".format(
str(self.valobj.GetName()), value)
return value
# Floyd's cycle-finding algorithm
# try to detect if this list has a loop
def has_loop(self):
global _list_uses_loop_detector
logger = lldb.formatters.Logger.Logger()
if not _list_uses_loop_detector:
logger >> "Asked not to use loop detection"
return False
slow = self.next
fast1 = self.next
fast2 = self.next
while self.is_valid(slow):
slow_value = self.value(slow)
fast1 = self.next_node(fast2)
fast2 = self.next_node(fast1)
if self.value(fast1) == slow_value or self.value(
fast2) == slow_value:
return True
slow = self.next_node(slow)
return False
def num_children(self):
logger = lldb.formatters.Logger.Logger()
if self.count is None:
# libstdc++ 6.0.21 added dedicated count field.
count_child = self.node.GetChildMemberWithName('_M_data')
if count_child and count_child.IsValid():
self.count = count_child.GetValueAsUnsigned(0)
if self.count is None:
self.count = self.num_children_impl()
return self.count
def num_children_impl(self):
logger = lldb.formatters.Logger.Logger()
try:
next_val = self.next.GetValueAsUnsigned(0)
prev_val = self.prev.GetValueAsUnsigned(0)
# After a std::list has been initialized, both next and prev will
# be non-NULL
if next_val == 0 or prev_val == 0:
return 0
if next_val == self.node_address:
return 0
if next_val == prev_val:
return 1
if self.has_loop():
return 0
size = 2
current = self.next
while current.GetChildMemberWithName(
'_M_next').GetValueAsUnsigned(0) != self.node_address:
size = size + 1
current = current.GetChildMemberWithName('_M_next')
return (size - 1)
except:
return 0
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1
def get_child_at_index(self, index):
logger = lldb.formatters.Logger.Logger()
logger >> "Fetching child " + str(index)
if index < 0:
return None
if index >= self.num_children():
return None
try:
offset = index
current = self.next
while offset > 0:
current = current.GetChildMemberWithName('_M_next')
offset = offset - 1
return current.CreateChildAtOffset(
'[' + str(index) + ']',
2 * current.GetType().GetByteSize(),
self.data_type)
except:
return None
def extract_type(self):
logger = lldb.formatters.Logger.Logger()
list_type = self.valobj.GetType().GetUnqualifiedType()
if list_type.IsReferenceType():
list_type = list_type.GetDereferencedType()
if list_type.GetNumberOfTemplateArguments() > 0:
data_type = list_type.GetTemplateArgumentType(0)
else:
data_type = None
return data_type
def update(self):
logger = lldb.formatters.Logger.Logger()
# preemptively setting this to None - we might end up changing our mind
# later
self.count = None
try:
impl = self.valobj.GetChildMemberWithName('_M_impl')
self.node = impl.GetChildMemberWithName('_M_node')
self.node_address = self.valobj.AddressOf().GetValueAsUnsigned(0)
self.next = self.node.GetChildMemberWithName('_M_next')
self.prev = self.node.GetChildMemberWithName('_M_prev')
self.data_type = self.extract_type()
self.data_size = self.data_type.GetByteSize()
except:
pass
def has_children(self):
return True
class StdVectorSynthProvider:
class StdVectorImplementation(object):
def __init__(self, valobj):
self.valobj = valobj
self.count = None
def num_children(self):
if self.count is None:
self.count = self.num_children_impl()
return self.count
def num_children_impl(self):
try:
start_val = self.start.GetValueAsUnsigned(0)
finish_val = self.finish.GetValueAsUnsigned(0)
end_val = self.end.GetValueAsUnsigned(0)
# Before a vector has been constructed, it will contain bad values
# so we really need to be careful about the length we return since
# uninitialized data can cause us to return a huge number. We need
# to also check for any of the start, finish or end of storage values
# being zero (NULL). If any are, then this vector has not been
# initialized yet and we should return zero
# Make sure nothing is NULL
if start_val == 0 or finish_val == 0 or end_val == 0:
return 0
# Make sure start is less than finish
if start_val >= finish_val:
return 0
# Make sure finish is less than or equal to end of storage
if finish_val > end_val:
return 0
# if we have a struct (or other data type that the compiler pads to native word size)
# this check might fail, unless the sizeof() we get is itself incremented to take the
# padding bytes into account - on current clang it looks like
# this is the case
num_children = (finish_val - start_val)
if (num_children % self.data_size) != 0:
return 0
else:
num_children = num_children / self.data_size
return num_children
except:
return 0
def get_child_at_index(self, index):
logger = lldb.formatters.Logger.Logger()
logger >> "Retrieving child " + str(index)
if index < 0:
return None
if index >= self.num_children():
return None
try:
offset = index * self.data_size
return self.start.CreateChildAtOffset(
'[' + str(index) + ']', offset, self.data_type)
except:
return None
def update(self):
# preemptively setting this to None - we might end up changing our
# mind later
self.count = None
try:
impl = self.valobj.GetChildMemberWithName('_M_impl')
self.start = impl.GetChildMemberWithName('_M_start')
self.finish = impl.GetChildMemberWithName('_M_finish')
self.end = impl.GetChildMemberWithName('_M_end_of_storage')
self.data_type = self.start.GetType().GetPointeeType()
self.data_size = self.data_type.GetByteSize()
# if any of these objects is invalid, it means there is no
# point in trying to fetch anything
if self.start.IsValid() and self.finish.IsValid(
) and self.end.IsValid() and self.data_type.IsValid():
self.count = None
else:
self.count = 0
except:
pass
return True
class StdVBoolImplementation(object):
def __init__(self, valobj, bool_type):
self.valobj = valobj
self.bool_type = bool_type
self.valid = False
def num_children(self):
if self.valid:
start = self.start_p.GetValueAsUnsigned(0)
finish = self.finish_p.GetValueAsUnsigned(0)
offset = self.offset.GetValueAsUnsigned(0)
if finish >= start:
return (finish - start) * 8 + offset
return 0
def get_child_at_index(self, index):
if index >= self.num_children():
return None
element_type = self.start_p.GetType().GetPointeeType()
element_bits = 8 * element_type.GetByteSize()
element_offset = (index / element_bits) * \
element_type.GetByteSize()
bit_offset = index % element_bits
element = self.start_p.CreateChildAtOffset(
'[' + str(index) + ']', element_offset, element_type)
bit = element.GetValueAsUnsigned(0) & (1 << bit_offset)
if bit != 0:
value_expr = "(bool)true"
else:
value_expr = "(bool)false"
return self.valobj.CreateValueFromExpression(
"[%d]" % index, value_expr)
def update(self):
try:
m_impl = self.valobj.GetChildMemberWithName('_M_impl')
self.m_start = m_impl.GetChildMemberWithName('_M_start')
self.m_finish = m_impl.GetChildMemberWithName('_M_finish')
self.start_p = self.m_start.GetChildMemberWithName('_M_p')
self.finish_p = self.m_finish.GetChildMemberWithName('_M_p')
self.offset = self.m_finish.GetChildMemberWithName('_M_offset')
self.valid = True
except:
self.valid = False
return True
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
first_template_arg_type = valobj.GetType().GetTemplateArgumentType(0)
if str(first_template_arg_type.GetName()) == "bool":
self.impl = self.StdVBoolImplementation(
valobj, first_template_arg_type)
else:
self.impl = self.StdVectorImplementation(valobj)
logger >> "Providing synthetic children for a vector named " + \
str(valobj.GetName())
def num_children(self):
return self.impl.num_children()
def get_child_index(self, name):
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1
def get_child_at_index(self, index):
return self.impl.get_child_at_index(index)
def update(self):
return self.impl.update()
def has_children(self):
return True
class StdMapSynthProvider:
def __init__(self, valobj, dict):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.count = None
logger >> "Providing synthetic children for a map named " + \
str(valobj.GetName())
# we need this function as a temporary workaround for rdar://problem/10801549
# which prevents us from extracting the std::pair<K,V> SBType out of the template
# arguments for _Rep_Type _M_t in the map itself - because we have to make up the
# typename and then find it, we may hit the situation were std::string has multiple
# names but only one is actually referenced in the debug information. hence, we need
# to replace the longer versions of std::string with the shorter one in order to be able
# to find the type name
def fixup_class_name(self, class_name):
logger = lldb.formatters.Logger.Logger()
if class_name == 'std::basic_string<char, std::char_traits<char>, std::allocator<char> >':
return 'std::basic_string<char>', True
if class_name == 'basic_string<char, std::char_traits<char>, std::allocator<char> >':
return 'std::basic_string<char>', True
if class_name == 'std::basic_string<char, std::char_traits<char>, std::allocator<char> >':
return 'std::basic_string<char>', True
if class_name == 'basic_string<char, std::char_traits<char>, std::allocator<char> >':
return 'std::basic_string<char>', True
return class_name, False
def update(self):
logger = lldb.formatters.Logger.Logger()
# preemptively setting this to None - we might end up changing our mind
# later
self.count = None
try:
# we will set this to True if we find out that discovering a node in the map takes more steps than the overall size of the RB tree
# if this gets set to True, then we will merrily return None for
# any child from that moment on
self.garbage = False
self.Mt = self.valobj.GetChildMemberWithName('_M_t')
self.Mimpl = self.Mt.GetChildMemberWithName('_M_impl')
self.Mheader = self.Mimpl.GetChildMemberWithName('_M_header')
map_type = self.valobj.GetType()
if map_type.IsReferenceType():
logger >> "Dereferencing type"
map_type = map_type.GetDereferencedType()
# Get the type of std::pair<key, value>. It is the first template
# argument type of the 4th template argument to std::map.
allocator_type = map_type.GetTemplateArgumentType(3)
self.data_type = allocator_type.GetTemplateArgumentType(0)
if not self.data_type:
# GCC does not emit DW_TAG_template_type_parameter for
# std::allocator<...>. For such a case, get the type of
# std::pair from a member of std::map.
rep_type = self.valobj.GetChildMemberWithName('_M_t').GetType()
self.data_type = rep_type.GetTypedefedType().GetTemplateArgumentType(1)
# from libstdc++ implementation of _M_root for rbtree
self.Mroot = self.Mheader.GetChildMemberWithName('_M_parent')
self.data_size = self.data_type.GetByteSize()
self.skip_size = self.Mheader.GetType().GetByteSize()
except:
pass
def num_children(self):
logger = lldb.formatters.Logger.Logger()
if self.count is None:
self.count = self.num_children_impl()
return self.count
def num_children_impl(self):
logger = lldb.formatters.Logger.Logger()
try:
root_ptr_val = self.node_ptr_value(self.Mroot)
if root_ptr_val == 0:
return 0
count = self.Mimpl.GetChildMemberWithName(
'_M_node_count').GetValueAsUnsigned(0)
logger >> "I have " + str(count) + " children available"
return count
except:
return 0
def get_child_index(self, name):
logger = lldb.formatters.Logger.Logger()
try:
return int(name.lstrip('[').rstrip(']'))
except:
return -1
def get_child_at_index(self, index):
logger = lldb.formatters.Logger.Logger()
logger >> "Being asked to fetch child[" + str(index) + "]"
if index < 0:
return None
if index >= self.num_children():
return None
if self.garbage:
logger >> "Returning None since we are a garbage tree"
return None
try:
offset = index
current = self.left(self.Mheader)
while offset > 0:
current = self.increment_node(current)
offset = offset - 1
# skip all the base stuff and get at the data
return current.CreateChildAtOffset(
'[' + str(index) + ']', self.skip_size, self.data_type)
except:
return None
# utility functions
def node_ptr_value(self, node):
logger = lldb.formatters.Logger.Logger()
return node.GetValueAsUnsigned(0)
def right(self, node):
logger = lldb.formatters.Logger.Logger()
return node.GetChildMemberWithName("_M_right")
def left(self, node):
logger = lldb.formatters.Logger.Logger()
return node.GetChildMemberWithName("_M_left")
def parent(self, node):
logger = lldb.formatters.Logger.Logger()
return node.GetChildMemberWithName("_M_parent")
# from libstdc++ implementation of iterator for rbtree
def increment_node(self, node):
logger = lldb.formatters.Logger.Logger()
max_steps = self.num_children()
if self.node_ptr_value(self.right(node)) != 0:
x = self.right(node)
max_steps -= 1
while self.node_ptr_value(self.left(x)) != 0:
x = self.left(x)
max_steps -= 1
logger >> str(max_steps) + " more to go before giving up"
if max_steps <= 0:
self.garbage = True
return None
return x
else:
x = node
y = self.parent(x)
max_steps -= 1
while(self.node_ptr_value(x) == self.node_ptr_value(self.right(y))):
x = y
y = self.parent(y)
max_steps -= 1
logger >> str(max_steps) + " more to go before giving up"
if max_steps <= 0:
self.garbage = True
return None
if self.node_ptr_value(self.right(x)) != self.node_ptr_value(y):
x = y
return x
def has_children(self):
return True
_list_uses_loop_detector = True
| |
"""
Processors are little transformation blocks that transform the fragments list
from a buffer before the BufferControl will render it to the screen.
They can insert fragments before or after, or highlight fragments by replacing the
fragment types.
"""
import re
from abc import ABCMeta, abstractmethod
from typing import (
TYPE_CHECKING,
Callable,
Hashable,
List,
Optional,
Tuple,
Type,
Union,
cast,
)
from prompt_toolkit.application.current import get_app
from prompt_toolkit.cache import SimpleCache
from prompt_toolkit.document import Document
from prompt_toolkit.filters import FilterOrBool, to_filter, vi_insert_multiple_mode
from prompt_toolkit.formatted_text import (
AnyFormattedText,
StyleAndTextTuples,
to_formatted_text,
)
from prompt_toolkit.formatted_text.utils import fragment_list_len, fragment_list_to_text
from prompt_toolkit.search import SearchDirection
from prompt_toolkit.utils import to_int, to_str
from .utils import explode_text_fragments
if TYPE_CHECKING:
from .controls import BufferControl, UIContent
__all__ = [
"Processor",
"TransformationInput",
"Transformation",
"DummyProcessor",
"HighlightSearchProcessor",
"HighlightIncrementalSearchProcessor",
"HighlightSelectionProcessor",
"PasswordProcessor",
"HighlightMatchingBracketProcessor",
"DisplayMultipleCursors",
"BeforeInput",
"ShowArg",
"AfterInput",
"AppendAutoSuggestion",
"ConditionalProcessor",
"ShowLeadingWhiteSpaceProcessor",
"ShowTrailingWhiteSpaceProcessor",
"TabsProcessor",
"ReverseSearchProcessor",
"DynamicProcessor",
"merge_processors",
]
class Processor(metaclass=ABCMeta):
"""
Manipulate the fragments for a given line in a
:class:`~prompt_toolkit.layout.controls.BufferControl`.
"""
@abstractmethod
def apply_transformation(
self, transformation_input: "TransformationInput"
) -> "Transformation":
"""
Apply transformation. Returns a :class:`.Transformation` instance.
:param transformation_input: :class:`.TransformationInput` object.
"""
return Transformation(transformation_input.fragments)
SourceToDisplay = Callable[[int], int]
DisplayToSource = Callable[[int], int]
class TransformationInput:
"""
:param buffer_control: :class:`.BufferControl` instance.
:param lineno: The number of the line to which we apply the processor.
:param source_to_display: A function that returns the position in the
`fragments` for any position in the source string. (This takes
previous processors into account.)
:param fragments: List of fragments that we can transform. (Received from the
previous processor.)
"""
def __init__(
self,
buffer_control: "BufferControl",
document: Document,
lineno: int,
source_to_display: SourceToDisplay,
fragments: StyleAndTextTuples,
width: int,
height: int,
) -> None:
self.buffer_control = buffer_control
self.document = document
self.lineno = lineno
self.source_to_display = source_to_display
self.fragments = fragments
self.width = width
self.height = height
def unpack(
self,
) -> Tuple[
"BufferControl", Document, int, SourceToDisplay, StyleAndTextTuples, int, int
]:
return (
self.buffer_control,
self.document,
self.lineno,
self.source_to_display,
self.fragments,
self.width,
self.height,
)
class Transformation:
"""
Transformation result, as returned by :meth:`.Processor.apply_transformation`.
Important: Always make sure that the length of `document.text` is equal to
the length of all the text in `fragments`!
:param fragments: The transformed fragments. To be displayed, or to pass to
the next processor.
:param source_to_display: Cursor position transformation from original
string to transformed string.
:param display_to_source: Cursor position transformed from source string to
original string.
"""
def __init__(
self,
fragments: StyleAndTextTuples,
source_to_display: Optional[SourceToDisplay] = None,
display_to_source: Optional[DisplayToSource] = None,
) -> None:
self.fragments = fragments
self.source_to_display = source_to_display or (lambda i: i)
self.display_to_source = display_to_source or (lambda i: i)
class DummyProcessor(Processor):
"""
A `Processor` that doesn't do anything.
"""
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
return Transformation(transformation_input.fragments)
class HighlightSearchProcessor(Processor):
"""
Processor that highlights search matches in the document.
Note that this doesn't support multiline search matches yet.
The style classes 'search' and 'search.current' will be applied to the
content.
"""
_classname = "search"
_classname_current = "search.current"
def _get_search_text(self, buffer_control: "BufferControl") -> str:
"""
The text we are searching for.
"""
return buffer_control.search_state.text
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
(
buffer_control,
document,
lineno,
source_to_display,
fragments,
_,
_,
) = transformation_input.unpack()
search_text = self._get_search_text(buffer_control)
searchmatch_fragment = " class:%s " % (self._classname,)
searchmatch_current_fragment = " class:%s " % (self._classname_current,)
if search_text and not get_app().is_done:
# For each search match, replace the style string.
line_text = fragment_list_to_text(fragments)
fragments = explode_text_fragments(fragments)
if buffer_control.search_state.ignore_case():
flags = re.IGNORECASE
else:
flags = re.RegexFlag(0)
# Get cursor column.
cursor_column: Optional[int]
if document.cursor_position_row == lineno:
cursor_column = source_to_display(document.cursor_position_col)
else:
cursor_column = None
for match in re.finditer(re.escape(search_text), line_text, flags=flags):
if cursor_column is not None:
on_cursor = match.start() <= cursor_column < match.end()
else:
on_cursor = False
for i in range(match.start(), match.end()):
old_fragment, text, *_ = fragments[i]
if on_cursor:
fragments[i] = (
old_fragment + searchmatch_current_fragment,
fragments[i][1],
)
else:
fragments[i] = (
old_fragment + searchmatch_fragment,
fragments[i][1],
)
return Transformation(fragments)
class HighlightIncrementalSearchProcessor(HighlightSearchProcessor):
"""
Highlight the search terms that are used for highlighting the incremental
search. The style class 'incsearch' will be applied to the content.
Important: this requires the `preview_search=True` flag to be set for the
`BufferControl`. Otherwise, the cursor position won't be set to the search
match while searching, and nothing happens.
"""
_classname = "incsearch"
_classname_current = "incsearch.current"
def _get_search_text(self, buffer_control: "BufferControl") -> str:
"""
The text we are searching for.
"""
# When the search buffer has focus, take that text.
search_buffer = buffer_control.search_buffer
if search_buffer is not None and search_buffer.text:
return search_buffer.text
return ""
class HighlightSelectionProcessor(Processor):
"""
Processor that highlights the selection in the document.
"""
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
(
buffer_control,
document,
lineno,
source_to_display,
fragments,
_,
_,
) = transformation_input.unpack()
selected_fragment = " class:selected "
# In case of selection, highlight all matches.
selection_at_line = document.selection_range_at_line(lineno)
if selection_at_line:
from_, to = selection_at_line
from_ = source_to_display(from_)
to = source_to_display(to)
fragments = explode_text_fragments(fragments)
if from_ == 0 and to == 0 and len(fragments) == 0:
# When this is an empty line, insert a space in order to
# visualise the selection.
return Transformation([(selected_fragment, " ")])
else:
for i in range(from_, to):
if i < len(fragments):
old_fragment, old_text, *_ = fragments[i]
fragments[i] = (old_fragment + selected_fragment, old_text)
elif i == len(fragments):
fragments.append((selected_fragment, " "))
return Transformation(fragments)
class PasswordProcessor(Processor):
"""
Processor that masks the input. (For passwords.)
:param char: (string) Character to be used. "*" by default.
"""
def __init__(self, char: str = "*") -> None:
self.char = char
def apply_transformation(self, ti: TransformationInput) -> Transformation:
fragments: StyleAndTextTuples = cast(
StyleAndTextTuples,
[
(style, self.char * len(text), *handler)
for style, text, *handler in ti.fragments
],
)
return Transformation(fragments)
class HighlightMatchingBracketProcessor(Processor):
"""
When the cursor is on or right after a bracket, it highlights the matching
bracket.
:param max_cursor_distance: Only highlight matching brackets when the
cursor is within this distance. (From inside a `Processor`, we can't
know which lines will be visible on the screen. But we also don't want
to scan the whole document for matching brackets on each key press, so
we limit to this value.)
"""
_closing_braces = "])}>"
def __init__(
self, chars: str = "[](){}<>", max_cursor_distance: int = 1000
) -> None:
self.chars = chars
self.max_cursor_distance = max_cursor_distance
self._positions_cache: SimpleCache[
Hashable, List[Tuple[int, int]]
] = SimpleCache(maxsize=8)
def _get_positions_to_highlight(self, document: Document) -> List[Tuple[int, int]]:
"""
Return a list of (row, col) tuples that need to be highlighted.
"""
pos: Optional[int]
# Try for the character under the cursor.
if document.current_char and document.current_char in self.chars:
pos = document.find_matching_bracket_position(
start_pos=document.cursor_position - self.max_cursor_distance,
end_pos=document.cursor_position + self.max_cursor_distance,
)
# Try for the character before the cursor.
elif (
document.char_before_cursor
and document.char_before_cursor in self._closing_braces
and document.char_before_cursor in self.chars
):
document = Document(document.text, document.cursor_position - 1)
pos = document.find_matching_bracket_position(
start_pos=document.cursor_position - self.max_cursor_distance,
end_pos=document.cursor_position + self.max_cursor_distance,
)
else:
pos = None
# Return a list of (row, col) tuples that need to be highlighted.
if pos:
pos += document.cursor_position # pos is relative.
row, col = document.translate_index_to_position(pos)
return [
(row, col),
(document.cursor_position_row, document.cursor_position_col),
]
else:
return []
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
(
buffer_control,
document,
lineno,
source_to_display,
fragments,
_,
_,
) = transformation_input.unpack()
# When the application is in the 'done' state, don't highlight.
if get_app().is_done:
return Transformation(fragments)
# Get the highlight positions.
key = (get_app().render_counter, document.text, document.cursor_position)
positions = self._positions_cache.get(
key, lambda: self._get_positions_to_highlight(document)
)
# Apply if positions were found at this line.
if positions:
for row, col in positions:
if row == lineno:
col = source_to_display(col)
fragments = explode_text_fragments(fragments)
style, text, *_ = fragments[col]
if col == document.cursor_position_col:
style += " class:matching-bracket.cursor "
else:
style += " class:matching-bracket.other "
fragments[col] = (style, text)
return Transformation(fragments)
class DisplayMultipleCursors(Processor):
"""
When we're in Vi block insert mode, display all the cursors.
"""
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
(
buffer_control,
document,
lineno,
source_to_display,
fragments,
_,
_,
) = transformation_input.unpack()
buff = buffer_control.buffer
if vi_insert_multiple_mode():
cursor_positions = buff.multiple_cursor_positions
fragments = explode_text_fragments(fragments)
# If any cursor appears on the current line, highlight that.
start_pos = document.translate_row_col_to_index(lineno, 0)
end_pos = start_pos + len(document.lines[lineno])
fragment_suffix = " class:multiple-cursors"
for p in cursor_positions:
if start_pos <= p <= end_pos:
column = source_to_display(p - start_pos)
# Replace fragment.
try:
style, text, *_ = fragments[column]
except IndexError:
# Cursor needs to be displayed after the current text.
fragments.append((fragment_suffix, " "))
else:
style += fragment_suffix
fragments[column] = (style, text)
return Transformation(fragments)
else:
return Transformation(fragments)
class BeforeInput(Processor):
"""
Insert text before the input.
:param text: This can be either plain text or formatted text
(or a callable that returns any of those).
:param style: style to be applied to this prompt/prefix.
"""
def __init__(self, text: AnyFormattedText, style: str = "") -> None:
self.text = text
self.style = style
def apply_transformation(self, ti: TransformationInput) -> Transformation:
source_to_display: Optional[SourceToDisplay]
display_to_source: Optional[DisplayToSource]
if ti.lineno == 0:
# Get fragments.
fragments_before = to_formatted_text(self.text, self.style)
fragments = fragments_before + ti.fragments
shift_position = fragment_list_len(fragments_before)
source_to_display = lambda i: i + shift_position
display_to_source = lambda i: i - shift_position
else:
fragments = ti.fragments
source_to_display = None
display_to_source = None
return Transformation(
fragments,
source_to_display=source_to_display,
display_to_source=display_to_source,
)
def __repr__(self) -> str:
return "BeforeInput(%r, %r)" % (self.text, self.style)
class ShowArg(BeforeInput):
"""
Display the 'arg' in front of the input.
This was used by the `PromptSession`, but now it uses the
`Window.get_line_prefix` function instead.
"""
def __init__(self) -> None:
super().__init__(self._get_text_fragments)
def _get_text_fragments(self) -> StyleAndTextTuples:
app = get_app()
if app.key_processor.arg is None:
return []
else:
arg = app.key_processor.arg
return [
("class:prompt.arg", "(arg: "),
("class:prompt.arg.text", str(arg)),
("class:prompt.arg", ") "),
]
def __repr__(self) -> str:
return "ShowArg()"
class AfterInput(Processor):
"""
Insert text after the input.
:param text: This can be either plain text or formatted text
(or a callable that returns any of those).
:param style: style to be applied to this prompt/prefix.
"""
def __init__(self, text: AnyFormattedText, style: str = "") -> None:
self.text = text
self.style = style
def apply_transformation(self, ti: TransformationInput) -> Transformation:
# Insert fragments after the last line.
if ti.lineno == ti.document.line_count - 1:
# Get fragments.
fragments_after = to_formatted_text(self.text, self.style)
return Transformation(fragments=ti.fragments + fragments_after)
else:
return Transformation(fragments=ti.fragments)
def __repr__(self) -> str:
return "%s(%r, style=%r)" % (self.__class__.__name__, self.text, self.style)
class AppendAutoSuggestion(Processor):
"""
Append the auto suggestion to the input.
(The user can then press the right arrow the insert the suggestion.)
"""
def __init__(self, style: str = "class:auto-suggestion") -> None:
self.style = style
def apply_transformation(self, ti: TransformationInput) -> Transformation:
# Insert fragments after the last line.
if ti.lineno == ti.document.line_count - 1:
buffer = ti.buffer_control.buffer
if buffer.suggestion and ti.document.is_cursor_at_the_end:
suggestion = buffer.suggestion.text
else:
suggestion = ""
return Transformation(fragments=ti.fragments + [(self.style, suggestion)])
else:
return Transformation(fragments=ti.fragments)
class ShowLeadingWhiteSpaceProcessor(Processor):
"""
Make leading whitespace visible.
:param get_char: Callable that returns one character.
"""
def __init__(
self,
get_char: Optional[Callable[[], str]] = None,
style: str = "class:leading-whitespace",
) -> None:
def default_get_char() -> str:
if "\xb7".encode(get_app().output.encoding(), "replace") == b"?":
return "."
else:
return "\xb7"
self.style = style
self.get_char = get_char or default_get_char
def apply_transformation(self, ti: TransformationInput) -> Transformation:
fragments = ti.fragments
# Walk through all te fragments.
if fragments and fragment_list_to_text(fragments).startswith(" "):
t = (self.style, self.get_char())
fragments = explode_text_fragments(fragments)
for i in range(len(fragments)):
if fragments[i][1] == " ":
fragments[i] = t
else:
break
return Transformation(fragments)
class ShowTrailingWhiteSpaceProcessor(Processor):
"""
Make trailing whitespace visible.
:param get_char: Callable that returns one character.
"""
def __init__(
self,
get_char: Optional[Callable[[], str]] = None,
style: str = "class:training-whitespace",
) -> None:
def default_get_char() -> str:
if "\xb7".encode(get_app().output.encoding(), "replace") == b"?":
return "."
else:
return "\xb7"
self.style = style
self.get_char = get_char or default_get_char
def apply_transformation(self, ti: TransformationInput) -> Transformation:
fragments = ti.fragments
if fragments and fragments[-1][1].endswith(" "):
t = (self.style, self.get_char())
fragments = explode_text_fragments(fragments)
# Walk backwards through all te fragments and replace whitespace.
for i in range(len(fragments) - 1, -1, -1):
char = fragments[i][1]
if char == " ":
fragments[i] = t
else:
break
return Transformation(fragments)
class TabsProcessor(Processor):
"""
Render tabs as spaces (instead of ^I) or make them visible (for instance,
by replacing them with dots.)
:param tabstop: Horizontal space taken by a tab. (`int` or callable that
returns an `int`).
:param char1: Character or callable that returns a character (text of
length one). This one is used for the first space taken by the tab.
:param char2: Like `char1`, but for the rest of the space.
"""
def __init__(
self,
tabstop: Union[int, Callable[[], int]] = 4,
char1: Union[str, Callable[[], str]] = "|",
char2: Union[str, Callable[[], str]] = "\u2508",
style: str = "class:tab",
) -> None:
self.char1 = char1
self.char2 = char2
self.tabstop = tabstop
self.style = style
def apply_transformation(self, ti: TransformationInput) -> Transformation:
tabstop = to_int(self.tabstop)
style = self.style
# Create separator for tabs.
separator1 = to_str(self.char1)
separator2 = to_str(self.char2)
# Transform fragments.
fragments = explode_text_fragments(ti.fragments)
position_mappings = {}
result_fragments: StyleAndTextTuples = []
pos = 0
for i, fragment_and_text in enumerate(fragments):
position_mappings[i] = pos
if fragment_and_text[1] == "\t":
# Calculate how many characters we have to insert.
count = tabstop - (pos % tabstop)
if count == 0:
count = tabstop
# Insert tab.
result_fragments.append((style, separator1))
result_fragments.append((style, separator2 * (count - 1)))
pos += count
else:
result_fragments.append(fragment_and_text)
pos += 1
position_mappings[len(fragments)] = pos
# Add `pos+1` to mapping, because the cursor can be right after the
# line as well.
position_mappings[len(fragments) + 1] = pos + 1
def source_to_display(from_position: int) -> int:
" Maps original cursor position to the new one. "
return position_mappings[from_position]
def display_to_source(display_pos: int) -> int:
" Maps display cursor position to the original one. "
position_mappings_reversed = {v: k for k, v in position_mappings.items()}
while display_pos >= 0:
try:
return position_mappings_reversed[display_pos]
except KeyError:
display_pos -= 1
return 0
return Transformation(
result_fragments,
source_to_display=source_to_display,
display_to_source=display_to_source,
)
class ReverseSearchProcessor(Processor):
"""
Process to display the "(reverse-i-search)`...`:..." stuff around
the search buffer.
Note: This processor is meant to be applied to the BufferControl that
contains the search buffer, it's not meant for the original input.
"""
_excluded_input_processors: List[Type[Processor]] = [
HighlightSearchProcessor,
HighlightSelectionProcessor,
BeforeInput,
AfterInput,
]
def _get_main_buffer(
self, buffer_control: "BufferControl"
) -> Optional["BufferControl"]:
from prompt_toolkit.layout.controls import BufferControl
prev_control = get_app().layout.search_target_buffer_control
if (
isinstance(prev_control, BufferControl)
and prev_control.search_buffer_control == buffer_control
):
return prev_control
return None
def _content(
self, main_control: "BufferControl", ti: TransformationInput
) -> "UIContent":
from prompt_toolkit.layout.controls import BufferControl
# Emulate the BufferControl through which we are searching.
# For this we filter out some of the input processors.
excluded_processors = tuple(self._excluded_input_processors)
def filter_processor(item: Processor) -> Optional[Processor]:
"""Filter processors from the main control that we want to disable
here. This returns either an accepted processor or None."""
# For a `_MergedProcessor`, check each individual processor, recursively.
if isinstance(item, _MergedProcessor):
accepted_processors = [filter_processor(p) for p in item.processors]
return merge_processors(
[p for p in accepted_processors if p is not None]
)
# For a `ConditionalProcessor`, check the body.
elif isinstance(item, ConditionalProcessor):
p = filter_processor(item.processor)
if p:
return ConditionalProcessor(p, item.filter)
# Otherwise, check the processor itself.
else:
if not isinstance(item, excluded_processors):
return item
return None
filtered_processor = filter_processor(
merge_processors(main_control.input_processors or [])
)
highlight_processor = HighlightIncrementalSearchProcessor()
if filtered_processor:
new_processors = [filtered_processor, highlight_processor]
else:
new_processors = [highlight_processor]
from .controls import SearchBufferControl
assert isinstance(ti.buffer_control, SearchBufferControl)
buffer_control = BufferControl(
buffer=main_control.buffer,
input_processors=new_processors,
include_default_input_processors=False,
lexer=main_control.lexer,
preview_search=True,
search_buffer_control=cast(SearchBufferControl, ti.buffer_control),
)
return buffer_control.create_content(ti.width, ti.height, preview_search=True)
def apply_transformation(self, ti: TransformationInput) -> Transformation:
from .controls import SearchBufferControl
assert isinstance(
ti.buffer_control, SearchBufferControl
), "`ReverseSearchProcessor` should be applied to a `SearchBufferControl` only."
source_to_display: Optional[SourceToDisplay]
display_to_source: Optional[DisplayToSource]
main_control = self._get_main_buffer(ti.buffer_control)
if ti.lineno == 0 and main_control:
content = self._content(main_control, ti)
# Get the line from the original document for this search.
line_fragments = content.get_line(content.cursor_position.y)
if main_control.search_state.direction == SearchDirection.FORWARD:
direction_text = "i-search"
else:
direction_text = "reverse-i-search"
fragments_before: StyleAndTextTuples = [
("class:prompt.search", "("),
("class:prompt.search", direction_text),
("class:prompt.search", ")`"),
]
fragments = (
fragments_before
+ [
("class:prompt.search.text", fragment_list_to_text(ti.fragments)),
("", "': "),
]
+ line_fragments
)
shift_position = fragment_list_len(fragments_before)
source_to_display = lambda i: i + shift_position
display_to_source = lambda i: i - shift_position
else:
source_to_display = None
display_to_source = None
fragments = ti.fragments
return Transformation(
fragments,
source_to_display=source_to_display,
display_to_source=display_to_source,
)
class ConditionalProcessor(Processor):
"""
Processor that applies another processor, according to a certain condition.
Example::
# Create a function that returns whether or not the processor should
# currently be applied.
def highlight_enabled():
return true_or_false
# Wrapped it in a `ConditionalProcessor` for usage in a `BufferControl`.
BufferControl(input_processors=[
ConditionalProcessor(HighlightSearchProcessor(),
Condition(highlight_enabled))])
:param processor: :class:`.Processor` instance.
:param filter: :class:`~prompt_toolkit.filters.Filter` instance.
"""
def __init__(self, processor: Processor, filter: FilterOrBool) -> None:
self.processor = processor
self.filter = to_filter(filter)
def apply_transformation(
self, transformation_input: TransformationInput
) -> Transformation:
# Run processor when enabled.
if self.filter():
return self.processor.apply_transformation(transformation_input)
else:
return Transformation(transformation_input.fragments)
def __repr__(self) -> str:
return "%s(processor=%r, filter=%r)" % (
self.__class__.__name__,
self.processor,
self.filter,
)
class DynamicProcessor(Processor):
"""
Processor class that dynamically returns any Processor.
:param get_processor: Callable that returns a :class:`.Processor` instance.
"""
def __init__(self, get_processor: Callable[[], Optional[Processor]]) -> None:
self.get_processor = get_processor
def apply_transformation(self, ti: TransformationInput) -> Transformation:
processor = self.get_processor() or DummyProcessor()
return processor.apply_transformation(ti)
def merge_processors(processors: List[Processor]) -> Processor:
"""
Merge multiple `Processor` objects into one.
"""
if len(processors) == 0:
return DummyProcessor()
if len(processors) == 1:
return processors[0] # Nothing to merge.
return _MergedProcessor(processors)
class _MergedProcessor(Processor):
"""
Processor that groups multiple other `Processor` objects, but exposes an
API as if it is one `Processor`.
"""
def __init__(self, processors: List[Processor]):
self.processors = processors
def apply_transformation(self, ti: TransformationInput) -> Transformation:
source_to_display_functions = [ti.source_to_display]
display_to_source_functions = []
fragments = ti.fragments
def source_to_display(i: int) -> int:
"""Translate x position from the buffer to the x position in the
processor fragments list."""
for f in source_to_display_functions:
i = f(i)
return i
for p in self.processors:
transformation = p.apply_transformation(
TransformationInput(
ti.buffer_control,
ti.document,
ti.lineno,
source_to_display,
fragments,
ti.width,
ti.height,
)
)
fragments = transformation.fragments
display_to_source_functions.append(transformation.display_to_source)
source_to_display_functions.append(transformation.source_to_display)
def display_to_source(i: int) -> int:
for f in reversed(display_to_source_functions):
i = f(i)
return i
# In the case of a nested _MergedProcessor, each processor wants to
# receive a 'source_to_display' function (as part of the
# TransformationInput) that has everything in the chain before
# included, because it can be called as part of the
# `apply_transformation` function. However, this first
# `source_to_display` should not be part of the output that we are
# returning. (This is the most consistent with `display_to_source`.)
del source_to_display_functions[:1]
return Transformation(fragments, source_to_display, display_to_source)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import UserList
import io
import pathlib
import pytest
import socket
import threading
import weakref
import numpy as np
import pyarrow as pa
from pyarrow.tests.util import changed_environ
try:
from pandas.testing import assert_frame_equal, assert_series_equal
import pandas as pd
except ImportError:
pass
class IpcFixture:
write_stats = None
def __init__(self, sink_factory=lambda: io.BytesIO()):
self._sink_factory = sink_factory
self.sink = self.get_sink()
def get_sink(self):
return self._sink_factory()
def get_source(self):
return self.sink.getvalue()
def write_batches(self, num_batches=5, as_table=False):
nrows = 5
schema = pa.schema([('one', pa.float64()), ('two', pa.utf8())])
writer = self._get_writer(self.sink, schema)
batches = []
for i in range(num_batches):
batch = pa.record_batch(
[np.random.randn(nrows),
['foo', None, 'bar', 'bazbaz', 'qux']],
schema=schema)
batches.append(batch)
if as_table:
table = pa.Table.from_batches(batches)
writer.write_table(table)
else:
for batch in batches:
writer.write_batch(batch)
self.write_stats = writer.stats
writer.close()
return batches
class FileFormatFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.ipc.new_file(sink, schema)
def _check_roundtrip(self, as_table=False):
batches = self.write_batches(as_table=as_table)
file_contents = pa.BufferReader(self.get_source())
reader = pa.ipc.open_file(file_contents)
assert reader.num_record_batches == len(batches)
for i, batch in enumerate(batches):
# it works. Must convert back to DataFrame
batch = reader.get_batch(i)
assert batches[i].equals(batch)
assert reader.schema.equals(batches[0].schema)
assert isinstance(reader.stats, pa.ipc.ReadStats)
assert isinstance(self.write_stats, pa.ipc.WriteStats)
assert tuple(reader.stats) == tuple(self.write_stats)
class StreamFormatFixture(IpcFixture):
# ARROW-6474, for testing writing old IPC protocol with 4-byte prefix
use_legacy_ipc_format = False
# ARROW-9395, for testing writing old metadata version
options = None
def _get_writer(self, sink, schema):
return pa.ipc.new_stream(
sink,
schema,
use_legacy_format=self.use_legacy_ipc_format,
options=self.options,
)
class MessageFixture(IpcFixture):
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def ipc_fixture():
return IpcFixture()
@pytest.fixture
def file_fixture():
return FileFormatFixture()
@pytest.fixture
def stream_fixture():
return StreamFormatFixture()
def test_empty_file():
buf = b''
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_file(pa.BufferReader(buf))
def test_file_simple_roundtrip(file_fixture):
file_fixture._check_roundtrip(as_table=False)
def test_file_write_table(file_fixture):
file_fixture._check_roundtrip(as_table=True)
@pytest.mark.parametrize("sink_factory", [
lambda: io.BytesIO(),
lambda: pa.BufferOutputStream()
])
def test_file_read_all(sink_factory):
fixture = FileFormatFixture(sink_factory)
batches = fixture.write_batches()
file_contents = pa.BufferReader(fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
def test_open_file_from_buffer(file_fixture):
# ARROW-2859; APIs accept the buffer protocol
file_fixture.write_batches()
source = file_fixture.get_source()
reader1 = pa.ipc.open_file(source)
reader2 = pa.ipc.open_file(pa.BufferReader(source))
reader3 = pa.RecordBatchFileReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
st1 = reader1.stats
assert st1.num_messages == 6
assert st1.num_record_batches == 5
assert reader2.stats == st1
assert reader3.stats == st1
@pytest.mark.pandas
def test_file_read_pandas(file_fixture):
frames = [batch.to_pandas() for batch in file_fixture.write_batches()]
file_contents = pa.BufferReader(file_fixture.get_source())
reader = pa.ipc.open_file(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames).reset_index(drop=True)
assert_frame_equal(result, expected)
def test_file_pathlib(file_fixture, tmpdir):
file_fixture.write_batches()
source = file_fixture.get_source()
path = tmpdir.join('file.arrow').strpath
with open(path, 'wb') as f:
f.write(source)
t1 = pa.ipc.open_file(pathlib.Path(path)).read_all()
t2 = pa.ipc.open_file(pa.OSFile(path)).read_all()
assert t1.equals(t2)
def test_empty_stream():
buf = io.BytesIO(b'')
with pytest.raises(pa.ArrowInvalid):
pa.ipc.open_stream(buf)
@pytest.mark.pandas
def test_stream_categorical_roundtrip(stream_fixture):
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
batch = pa.RecordBatch.from_pandas(df)
with stream_fixture._get_writer(stream_fixture.sink, batch.schema) as wr:
wr.write_batch(batch)
table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(), df)
def test_open_stream_from_buffer(stream_fixture):
# ARROW-2859
stream_fixture.write_batches()
source = stream_fixture.get_source()
reader1 = pa.ipc.open_stream(source)
reader2 = pa.ipc.open_stream(pa.BufferReader(source))
reader3 = pa.RecordBatchStreamReader(source)
result1 = reader1.read_all()
result2 = reader2.read_all()
result3 = reader3.read_all()
assert result1.equals(result2)
assert result1.equals(result3)
st1 = reader1.stats
assert st1.num_messages == 6
assert st1.num_record_batches == 5
assert reader2.stats == st1
assert reader3.stats == st1
assert tuple(st1) == tuple(stream_fixture.write_stats)
@pytest.mark.pandas
def test_stream_write_dispatch(stream_fixture):
# ARROW-1616
df = pd.DataFrame({
'one': np.random.randn(5),
'two': pd.Categorical(['foo', np.nan, 'bar', 'foo', 'foo'],
categories=['foo', 'bar'],
ordered=True)
})
table = pa.Table.from_pandas(df, preserve_index=False)
batch = pa.RecordBatch.from_pandas(df, preserve_index=False)
with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr:
wr.write(table)
wr.write(batch)
table = (pa.ipc.open_stream(pa.BufferReader(stream_fixture.get_source()))
.read_all())
assert_frame_equal(table.to_pandas(),
pd.concat([df, df], ignore_index=True))
@pytest.mark.pandas
def test_stream_write_table_batches(stream_fixture):
# ARROW-504
df = pd.DataFrame({
'one': np.random.randn(20),
})
b1 = pa.RecordBatch.from_pandas(df[:10], preserve_index=False)
b2 = pa.RecordBatch.from_pandas(df, preserve_index=False)
table = pa.Table.from_batches([b1, b2, b1])
with stream_fixture._get_writer(stream_fixture.sink, table.schema) as wr:
wr.write_table(table, max_chunksize=15)
batches = list(pa.ipc.open_stream(stream_fixture.get_source()))
assert list(map(len, batches)) == [10, 15, 5, 10]
result_table = pa.Table.from_batches(batches)
assert_frame_equal(result_table.to_pandas(),
pd.concat([df[:10], df, df[:10]],
ignore_index=True))
@pytest.mark.parametrize('use_legacy_ipc_format', [False, True])
def test_stream_simple_roundtrip(stream_fixture, use_legacy_ipc_format):
stream_fixture.use_legacy_ipc_format = use_legacy_ipc_format
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
reader = pa.ipc.open_stream(file_contents)
assert reader.schema.equals(batches[0].schema)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.read_next_batch()
@pytest.mark.zstd
def test_compression_roundtrip():
sink = io.BytesIO()
values = np.random.randint(0, 10, 10000)
table = pa.Table.from_arrays([values], names=["values"])
options = pa.ipc.IpcWriteOptions(compression='zstd')
with pa.ipc.RecordBatchFileWriter(
sink, table.schema, options=options) as writer:
writer.write_table(table)
len1 = len(sink.getvalue())
sink2 = io.BytesIO()
codec = pa.Codec('zstd', compression_level=5)
options = pa.ipc.IpcWriteOptions(compression=codec)
with pa.ipc.RecordBatchFileWriter(
sink2, table.schema, options=options) as writer:
writer.write_table(table)
len2 = len(sink2.getvalue())
# In theory len2 should be less than len1 but for this test we just want
# to ensure compression_level is being correctly passed down to the C++
# layer so we don't really care if it makes it worse or better
assert len2 != len1
t1 = pa.ipc.open_file(sink).read_all()
t2 = pa.ipc.open_file(sink2).read_all()
assert t1 == t2
def test_write_options():
options = pa.ipc.IpcWriteOptions()
assert options.allow_64bit is False
assert options.use_legacy_format is False
assert options.metadata_version == pa.ipc.MetadataVersion.V5
options.allow_64bit = True
assert options.allow_64bit is True
options.use_legacy_format = True
assert options.use_legacy_format is True
options.metadata_version = pa.ipc.MetadataVersion.V4
assert options.metadata_version == pa.ipc.MetadataVersion.V4
for value in ('V5', 42):
with pytest.raises((TypeError, ValueError)):
options.metadata_version = value
assert options.compression is None
for value in ['lz4', 'zstd']:
if pa.Codec.is_available(value):
options.compression = value
assert options.compression == value
options.compression = value.upper()
assert options.compression == value
options.compression = None
assert options.compression is None
with pytest.raises(TypeError):
options.compression = 0
assert options.use_threads is True
options.use_threads = False
assert options.use_threads is False
if pa.Codec.is_available('lz4'):
options = pa.ipc.IpcWriteOptions(
metadata_version=pa.ipc.MetadataVersion.V4,
allow_64bit=True,
use_legacy_format=True,
compression='lz4',
use_threads=False)
assert options.metadata_version == pa.ipc.MetadataVersion.V4
assert options.allow_64bit is True
assert options.use_legacy_format is True
assert options.compression == 'lz4'
assert options.use_threads is False
def test_write_options_legacy_exclusive(stream_fixture):
with pytest.raises(
ValueError,
match="provide at most one of options and use_legacy_format"):
stream_fixture.use_legacy_ipc_format = True
stream_fixture.options = pa.ipc.IpcWriteOptions()
stream_fixture.write_batches()
@pytest.mark.parametrize('options', [
pa.ipc.IpcWriteOptions(),
pa.ipc.IpcWriteOptions(allow_64bit=True),
pa.ipc.IpcWriteOptions(use_legacy_format=True),
pa.ipc.IpcWriteOptions(metadata_version=pa.ipc.MetadataVersion.V4),
pa.ipc.IpcWriteOptions(use_legacy_format=True,
metadata_version=pa.ipc.MetadataVersion.V4),
])
def test_stream_options_roundtrip(stream_fixture, options):
stream_fixture.use_legacy_ipc_format = None
stream_fixture.options = options
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
message = pa.ipc.read_message(stream_fixture.get_source())
assert message.metadata_version == options.metadata_version
reader = pa.ipc.open_stream(file_contents)
assert reader.schema.equals(batches[0].schema)
total = 0
for i, next_batch in enumerate(reader):
assert next_batch.equals(batches[i])
total += 1
assert total == len(batches)
with pytest.raises(StopIteration):
reader.read_next_batch()
def test_dictionary_delta(stream_fixture):
ty = pa.dictionary(pa.int8(), pa.utf8())
data = [["foo", "foo", None],
["foo", "bar", "foo"], # potential delta
["foo", "bar"],
["foo", None, "bar", "quux"], # potential delta
["bar", "quux"], # replacement
]
batches = [
pa.RecordBatch.from_arrays([pa.array(v, type=ty)], names=['dicts'])
for v in data]
schema = batches[0].schema
def write_batches():
with stream_fixture._get_writer(pa.MockOutputStream(),
schema) as writer:
for batch in batches:
writer.write_batch(batch)
return writer.stats
st = write_batches()
assert st.num_record_batches == 5
assert st.num_dictionary_batches == 4
assert st.num_replaced_dictionaries == 3
assert st.num_dictionary_deltas == 0
stream_fixture.use_legacy_ipc_format = None
stream_fixture.options = pa.ipc.IpcWriteOptions(
emit_dictionary_deltas=True)
st = write_batches()
assert st.num_record_batches == 5
assert st.num_dictionary_batches == 4
assert st.num_replaced_dictionaries == 1
assert st.num_dictionary_deltas == 2
def test_envvar_set_legacy_ipc_format():
schema = pa.schema([pa.field('foo', pa.int32())])
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V5
with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert not writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
with changed_environ('ARROW_PRE_1_0_METADATA_VERSION', '1'):
with changed_environ('ARROW_PRE_0_15_IPC_FORMAT', '1'):
writer = pa.ipc.new_stream(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
writer = pa.ipc.new_file(pa.BufferOutputStream(), schema)
assert writer._use_legacy_format
assert writer._metadata_version == pa.ipc.MetadataVersion.V4
def test_stream_read_all(stream_fixture):
batches = stream_fixture.write_batches()
file_contents = pa.BufferReader(stream_fixture.get_source())
reader = pa.ipc.open_stream(file_contents)
result = reader.read_all()
expected = pa.Table.from_batches(batches)
assert result.equals(expected)
@pytest.mark.pandas
def test_stream_read_pandas(stream_fixture):
frames = [batch.to_pandas() for batch in stream_fixture.write_batches()]
file_contents = stream_fixture.get_source()
reader = pa.ipc.open_stream(file_contents)
result = reader.read_pandas()
expected = pd.concat(frames).reset_index(drop=True)
assert_frame_equal(result, expected)
@pytest.fixture
def example_messages(stream_fixture):
batches = stream_fixture.write_batches()
file_contents = stream_fixture.get_source()
buf_reader = pa.BufferReader(file_contents)
reader = pa.MessageReader.open_stream(buf_reader)
return batches, list(reader)
def test_message_ctors_no_segfault():
with pytest.raises(TypeError):
repr(pa.Message())
with pytest.raises(TypeError):
repr(pa.MessageReader())
def test_message_reader(example_messages):
_, messages = example_messages
assert len(messages) == 6
assert messages[0].type == 'schema'
assert isinstance(messages[0].metadata, pa.Buffer)
assert isinstance(messages[0].body, pa.Buffer)
assert messages[0].metadata_version == pa.MetadataVersion.V5
for msg in messages[1:]:
assert msg.type == 'record batch'
assert isinstance(msg.metadata, pa.Buffer)
assert isinstance(msg.body, pa.Buffer)
assert msg.metadata_version == pa.MetadataVersion.V5
def test_message_serialize_read_message(example_messages):
_, messages = example_messages
msg = messages[0]
buf = msg.serialize()
reader = pa.BufferReader(buf.to_pybytes() * 2)
restored = pa.ipc.read_message(buf)
restored2 = pa.ipc.read_message(reader)
restored3 = pa.ipc.read_message(buf.to_pybytes())
restored4 = pa.ipc.read_message(reader)
assert msg.equals(restored)
assert msg.equals(restored2)
assert msg.equals(restored3)
assert msg.equals(restored4)
with pytest.raises(pa.ArrowInvalid, match="Corrupted message"):
pa.ipc.read_message(pa.BufferReader(b'ab'))
with pytest.raises(EOFError):
pa.ipc.read_message(reader)
@pytest.mark.gzip
def test_message_read_from_compressed(example_messages):
# Part of ARROW-5910
_, messages = example_messages
for message in messages:
raw_out = pa.BufferOutputStream()
with pa.output_stream(raw_out, compression='gzip') as compressed_out:
message.serialize_to(compressed_out)
compressed_buf = raw_out.getvalue()
result = pa.ipc.read_message(pa.input_stream(compressed_buf,
compression='gzip'))
assert result.equals(message)
def test_message_read_record_batch(example_messages):
batches, messages = example_messages
for batch, message in zip(batches, messages[1:]):
read_batch = pa.ipc.read_record_batch(message, batch.schema)
assert read_batch.equals(batch)
def test_read_record_batch_on_stream_error_message():
# ARROW-5374
batch = pa.record_batch([pa.array([b"foo"], type=pa.utf8())],
names=['strs'])
stream = pa.BufferOutputStream()
with pa.ipc.new_stream(stream, batch.schema) as writer:
writer.write_batch(batch)
buf = stream.getvalue()
with pytest.raises(IOError,
match="type record batch but got schema"):
pa.ipc.read_record_batch(buf, batch.schema)
# ----------------------------------------------------------------------
# Socket streaming testa
class StreamReaderServer(threading.Thread):
def init(self, do_read_all):
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.bind(('127.0.0.1', 0))
self._sock.listen(1)
host, port = self._sock.getsockname()
self._do_read_all = do_read_all
self._schema = None
self._batches = []
self._table = None
return port
def run(self):
connection, client_address = self._sock.accept()
try:
source = connection.makefile(mode='rb')
reader = pa.ipc.open_stream(source)
self._schema = reader.schema
if self._do_read_all:
self._table = reader.read_all()
else:
for i, batch in enumerate(reader):
self._batches.append(batch)
finally:
connection.close()
def get_result(self):
return(self._schema, self._table if self._do_read_all
else self._batches)
class SocketStreamFixture(IpcFixture):
def __init__(self):
# XXX(wesm): test will decide when to start socket server. This should
# probably be refactored
pass
def start_server(self, do_read_all):
self._server = StreamReaderServer()
port = self._server.init(do_read_all)
self._server.start()
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.connect(('127.0.0.1', port))
self.sink = self.get_sink()
def stop_and_get_result(self):
import struct
self.sink.write(struct.pack('Q', 0))
self.sink.flush()
self._sock.close()
self._server.join()
return self._server.get_result()
def get_sink(self):
return self._sock.makefile(mode='wb')
def _get_writer(self, sink, schema):
return pa.RecordBatchStreamWriter(sink, schema)
@pytest.fixture
def socket_fixture():
return SocketStreamFixture()
def test_socket_simple_roundtrip(socket_fixture):
socket_fixture.start_server(do_read_all=False)
writer_batches = socket_fixture.write_batches()
reader_schema, reader_batches = socket_fixture.stop_and_get_result()
assert reader_schema.equals(writer_batches[0].schema)
assert len(reader_batches) == len(writer_batches)
for i, batch in enumerate(writer_batches):
assert reader_batches[i].equals(batch)
def test_socket_read_all(socket_fixture):
socket_fixture.start_server(do_read_all=True)
writer_batches = socket_fixture.write_batches()
_, result = socket_fixture.stop_and_get_result()
expected = pa.Table.from_batches(writer_batches)
assert result.equals(expected)
# ----------------------------------------------------------------------
# Miscellaneous IPC tests
@pytest.mark.pandas
def test_ipc_file_stream_has_eos():
# ARROW-5395
df = pd.DataFrame({'foo': [1.5]})
batch = pa.RecordBatch.from_pandas(df)
sink = pa.BufferOutputStream()
write_file(batch, sink)
buffer = sink.getvalue()
# skip the file magic
reader = pa.ipc.open_stream(buffer[8:])
# will fail if encounters footer data instead of eos
rdf = reader.read_pandas()
assert_frame_equal(df, rdf)
@pytest.mark.pandas
def test_ipc_zero_copy_numpy():
df = pd.DataFrame({'foo': [1.5]})
batch = pa.RecordBatch.from_pandas(df)
sink = pa.BufferOutputStream()
write_file(batch, sink)
buffer = sink.getvalue()
reader = pa.BufferReader(buffer)
batches = read_file(reader)
data = batches[0].to_pandas()
rdf = pd.DataFrame(data)
assert_frame_equal(df, rdf)
def test_ipc_stream_no_batches():
# ARROW-2307
table = pa.Table.from_arrays([pa.array([1, 2, 3, 4]),
pa.array(['foo', 'bar', 'baz', 'qux'])],
names=['a', 'b'])
sink = pa.BufferOutputStream()
with pa.ipc.new_stream(sink, table.schema):
pass
source = sink.getvalue()
with pa.ipc.open_stream(source) as reader:
result = reader.read_all()
assert result.schema.equals(table.schema)
assert len(result) == 0
@pytest.mark.pandas
def test_get_record_batch_size():
N = 10
itemsize = 8
df = pd.DataFrame({'foo': np.random.randn(N)})
batch = pa.RecordBatch.from_pandas(df)
assert pa.ipc.get_record_batch_size(batch) > (N * itemsize)
@pytest.mark.pandas
def _check_serialize_pandas_round_trip(df, use_threads=False):
buf = pa.serialize_pandas(df, nthreads=2 if use_threads else 1)
result = pa.deserialize_pandas(buf, use_threads=use_threads)
assert_frame_equal(result, df)
@pytest.mark.pandas
def test_pandas_serialize_round_trip():
index = pd.Index([1, 2, 3], name='my_index')
columns = ['foo', 'bar']
df = pd.DataFrame(
{'foo': [1.5, 1.6, 1.7], 'bar': list('abc')},
index=index, columns=columns
)
_check_serialize_pandas_round_trip(df)
@pytest.mark.pandas
def test_pandas_serialize_round_trip_nthreads():
index = pd.Index([1, 2, 3], name='my_index')
columns = ['foo', 'bar']
df = pd.DataFrame(
{'foo': [1.5, 1.6, 1.7], 'bar': list('abc')},
index=index, columns=columns
)
_check_serialize_pandas_round_trip(df, use_threads=True)
@pytest.mark.pandas
def test_pandas_serialize_round_trip_multi_index():
index1 = pd.Index([1, 2, 3], name='level_1')
index2 = pd.Index(list('def'), name=None)
index = pd.MultiIndex.from_arrays([index1, index2])
columns = ['foo', 'bar']
df = pd.DataFrame(
{'foo': [1.5, 1.6, 1.7], 'bar': list('abc')},
index=index,
columns=columns,
)
_check_serialize_pandas_round_trip(df)
@pytest.mark.pandas
def test_serialize_pandas_empty_dataframe():
df = pd.DataFrame()
_check_serialize_pandas_round_trip(df)
@pytest.mark.pandas
def test_pandas_serialize_round_trip_not_string_columns():
df = pd.DataFrame(list(zip([1.5, 1.6, 1.7], 'abc')))
buf = pa.serialize_pandas(df)
result = pa.deserialize_pandas(buf)
assert_frame_equal(result, df)
@pytest.mark.pandas
def test_serialize_pandas_no_preserve_index():
df = pd.DataFrame({'a': [1, 2, 3]}, index=[1, 2, 3])
expected = pd.DataFrame({'a': [1, 2, 3]})
buf = pa.serialize_pandas(df, preserve_index=False)
result = pa.deserialize_pandas(buf)
assert_frame_equal(result, expected)
buf = pa.serialize_pandas(df, preserve_index=True)
result = pa.deserialize_pandas(buf)
assert_frame_equal(result, df)
@pytest.mark.pandas
@pytest.mark.filterwarnings("ignore:'pyarrow:FutureWarning")
def test_serialize_with_pandas_objects():
df = pd.DataFrame({'a': [1, 2, 3]}, index=[1, 2, 3])
s = pd.Series([1, 2, 3, 4])
data = {
'a_series': df['a'],
'a_frame': df,
's_series': s
}
serialized = pa.serialize(data).to_buffer()
deserialized = pa.deserialize(serialized)
assert_frame_equal(deserialized['a_frame'], df)
assert_series_equal(deserialized['a_series'], df['a'])
assert deserialized['a_series'].name == 'a'
assert_series_equal(deserialized['s_series'], s)
assert deserialized['s_series'].name is None
@pytest.mark.pandas
def test_schema_batch_serialize_methods():
nrows = 5
df = pd.DataFrame({
'one': np.random.randn(nrows),
'two': ['foo', np.nan, 'bar', 'bazbaz', 'qux']})
batch = pa.RecordBatch.from_pandas(df)
s_schema = batch.schema.serialize()
s_batch = batch.serialize()
recons_schema = pa.ipc.read_schema(s_schema)
recons_batch = pa.ipc.read_record_batch(s_batch, recons_schema)
assert recons_batch.equals(batch)
def test_schema_serialization_with_metadata():
field_metadata = {b'foo': b'bar', b'kind': b'field'}
schema_metadata = {b'foo': b'bar', b'kind': b'schema'}
f0 = pa.field('a', pa.int8())
f1 = pa.field('b', pa.string(), metadata=field_metadata)
schema = pa.schema([f0, f1], metadata=schema_metadata)
s_schema = schema.serialize()
recons_schema = pa.ipc.read_schema(s_schema)
assert recons_schema.equals(schema)
assert recons_schema.metadata == schema_metadata
assert recons_schema[0].metadata is None
assert recons_schema[1].metadata == field_metadata
def test_deprecated_pyarrow_ns_apis():
table = pa.table([pa.array([1, 2, 3, 4])], names=['a'])
sink = pa.BufferOutputStream()
with pa.ipc.new_stream(sink, table.schema) as writer:
writer.write(table)
with pytest.warns(FutureWarning,
match="please use pyarrow.ipc.open_stream"):
pa.open_stream(sink.getvalue())
sink = pa.BufferOutputStream()
with pa.ipc.new_file(sink, table.schema) as writer:
writer.write(table)
with pytest.warns(FutureWarning, match="please use pyarrow.ipc.open_file"):
pa.open_file(sink.getvalue())
def write_file(batch, sink):
with pa.ipc.new_file(sink, batch.schema) as writer:
writer.write_batch(batch)
def read_file(source):
with pa.ipc.open_file(source) as reader:
return [reader.get_batch(i) for i in range(reader.num_record_batches)]
def test_write_empty_ipc_file():
# ARROW-3894: IPC file was not being properly initialized when no record
# batches are being written
schema = pa.schema([('field', pa.int64())])
sink = pa.BufferOutputStream()
with pa.ipc.new_file(sink, schema):
pass
buf = sink.getvalue()
with pa.RecordBatchFileReader(pa.BufferReader(buf)) as reader:
table = reader.read_all()
assert len(table) == 0
assert table.schema.equals(schema)
def test_py_record_batch_reader():
def make_schema():
return pa.schema([('field', pa.int64())])
def make_batches():
schema = make_schema()
batch1 = pa.record_batch([[1, 2, 3]], schema=schema)
batch2 = pa.record_batch([[4, 5]], schema=schema)
return [batch1, batch2]
# With iterable
batches = UserList(make_batches()) # weakrefable
wr = weakref.ref(batches)
with pa.ipc.RecordBatchReader.from_batches(make_schema(),
batches) as reader:
batches = None
assert wr() is not None
assert list(reader) == make_batches()
assert wr() is None
# With iterator
batches = iter(UserList(make_batches())) # weakrefable
wr = weakref.ref(batches)
with pa.ipc.RecordBatchReader.from_batches(make_schema(),
batches) as reader:
batches = None
assert wr() is not None
assert list(reader) == make_batches()
assert wr() is None
| |
# -*- coding: utf-8 -*-
"""Utility classes and values used for marshalling and unmarshalling objects to
and from primitive types.
.. warning::
This module is treated as private API.
Users should not need to use this module directly.
"""
from __future__ import unicode_literals
from marshmallow import utils
from marshmallow.utils import missing
from marshmallow.compat import text_type, iteritems
from marshmallow.exceptions import (
ValidationError,
)
__all__ = [
'Marshaller',
'Unmarshaller',
]
class ErrorStore(object):
def __init__(self):
#: Dictionary of errors stored during serialization
self.errors = {}
#: List of `Field` objects which have validation errors
self.error_fields = []
#: List of field_names which have validation errors
self.error_field_names = []
#: True while (de)serializing a collection
self._pending = False
def reset_errors(self):
self.errors = {}
self.error_field_names = []
self.error_fields = []
def get_errors(self, index=None):
if index is not None:
errors = self.errors.get(index, {})
self.errors[index] = errors
else:
errors = self.errors
return errors
def call_and_store(self, getter_func, data, field_name, field_obj, index=None):
"""Call ``getter_func`` with ``data`` as its argument, and store any `ValidationErrors`.
:param callable getter_func: Function for getting the serialized/deserialized
value from ``data``.
:param data: The data passed to ``getter_func``.
:param str field_name: Field name.
:param FieldABC field_obj: Field object that performs the
serialization/deserialization behavior.
:param int index: Index of the item being validated, if validating a collection,
otherwise `None`.
"""
try:
value = getter_func(data)
except ValidationError as err: # Store validation errors
self.error_fields.append(field_obj)
self.error_field_names.append(field_name)
errors = self.get_errors(index=index)
# Warning: Mutation!
if isinstance(err.messages, dict):
errors[field_name] = err.messages
else:
errors.setdefault(field_name, []).extend(err.messages)
value = missing
return value
class Marshaller(ErrorStore):
"""Callable class responsible for serializing data and storing errors.
:param str prefix: Optional prefix that will be prepended to all the
serialized field names.
"""
def __init__(self, prefix=''):
self.prefix = prefix
ErrorStore.__init__(self)
def serialize(self, obj, fields_dict, many=False, strict=False,
accessor=None, dict_class=dict, index_errors=True, index=None):
"""Takes raw data (a dict, list, or other object) and a dict of
fields to output and serializes the data based on those fields.
:param obj: The actual object(s) from which the fields are taken from
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param bool many: Set to `True` if ``data`` should be serialized as
a collection.
:param bool strict: If `True`, raise errors if invalid data are passed in
instead of failing silently and storing the errors.
:param callable accessor: Function to use for getting values from ``obj``.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the marshalled data
.. versionchanged:: 1.0.0
Renamed from ``marshal``.
"""
# Reset errors dict if not serializing a collection
if not self._pending:
self.reset_errors()
if many and obj is not None:
self._pending = True
ret = [self.serialize(d, fields_dict, many=False, strict=strict,
dict_class=dict_class, accessor=accessor,
index=idx, index_errors=index_errors)
for idx, d in enumerate(obj)]
self._pending = False
return ret
items = []
for attr_name, field_obj in iteritems(fields_dict):
if getattr(field_obj, 'load_only', False):
continue
if not self.prefix:
key = attr_name
else:
key = ''.join([self.prefix, attr_name])
getter = lambda d: field_obj.serialize(attr_name, d, accessor=accessor)
value = self.call_and_store(
getter_func=getter,
data=obj,
field_name=key,
field_obj=field_obj,
index=(index if index_errors else None)
)
if value is missing:
continue
items.append((key, value))
if self.errors and strict:
raise ValidationError(
self.errors,
field_names=self.error_field_names,
fields=self.error_fields
)
return dict_class(items)
# Make an instance callable
__call__ = serialize
# Key used for schema-level validation errors
SCHEMA = '_schema'
class Unmarshaller(ErrorStore):
"""Callable class responsible for deserializing data and storing errors.
.. versionadded:: 1.0.0
"""
def _run_validator(self, validator_func, output,
original_data, fields_dict, index=None,
strict=False, many=False, pass_original=False):
try:
if pass_original: # Pass original, raw data (before unmarshalling)
res = validator_func(output, original_data)
else:
res = validator_func(output)
if res is False:
func_name = utils.get_callable_name(validator_func)
raise ValidationError('Schema validator {0}({1}) is False'.format(
func_name, dict(output)
))
except ValidationError as err:
errors = self.get_errors(index=index)
# Store or reraise errors
if err.field_names:
field_names = err.field_names
field_objs = [fields_dict[each] for each in field_names]
else:
field_names = [SCHEMA]
field_objs = []
for field_name in field_names:
if isinstance(err.messages, (list, tuple)):
# self.errors[field_name] may be a dict if schemas are nested
if isinstance(errors.get(field_name), dict):
errors[field_name].setdefault(
SCHEMA, []
).extend(err.messages)
else:
errors.setdefault(field_name, []).extend(err.messages)
elif isinstance(err.messages, dict):
errors.setdefault(field_name, []).append(err.messages)
else:
errors.setdefault(field_name, []).append(text_type(err))
if strict:
raise ValidationError(
self.errors,
fields=field_objs,
field_names=field_names
)
def _validate(self, validators, output, original_data, fields_dict, index=None, strict=False):
"""Perform schema-level validation. Stores errors if ``strict`` is `False`.
"""
for validator_func in validators:
self._run_validator(validator_func, output, original_data, fields_dict,
index=index, strict=strict)
return output
def deserialize(self, data, fields_dict, many=False, validators=None,
preprocess=None, postprocess=None, strict=False, dict_class=dict,
index_errors=True, index=None):
"""Deserialize ``data`` based on the schema defined by ``fields_dict``.
:param dict data: The data to deserialize.
:param dict fields_dict: Mapping of field names to :class:`Field` objects.
:param bool many: Set to `True` if ``data`` should be deserialized as
a collection.
:param list validators: List of validation functions to apply to the
deserialized dictionary.
:param list preprocess: List of pre-processing functions.
:param bool strict: If `True`, raise errors if invalid data are passed in
instead of failing silently and storing the errors.
:param type dict_class: Dictionary class used to construct the output.
:param bool index_errors: Whether to store the index of invalid items in
``self.errors`` when ``many=True``.
:param int index: Index of the item being serialized (for storing errors) if
serializing a collection, otherwise `None`.
:return: A dictionary of the deserialized data.
"""
# Reset errors if not deserializing a collection
if not self._pending:
self.reset_errors()
if many and data is not None:
self._pending = True
ret = [self.deserialize(d, fields_dict, many=False,
validators=validators, preprocess=preprocess,
strict=strict, dict_class=dict_class,
index=idx, index_errors=index_errors)
for idx, d in enumerate(data)]
self._pending = False
return ret
original_data = data
if data is not None:
items = []
for attr_name, field_obj in iteritems(fields_dict):
if field_obj.dump_only:
continue
try:
raw_value = data.get(attr_name, missing)
except AttributeError: # Input data is not a dict
msg = 'Data must be a dict, got a {0}'.format(data.__class__.__name__)
errors = self.get_errors(index=index)
if strict:
raise ValidationError(
msg,
field_names=[SCHEMA],
fields=[]
)
else:
errors = self.get_errors()
errors.setdefault(SCHEMA, []).append(msg)
# Input data type is incorrect, so we can bail out early
break
field_name = attr_name
if raw_value is missing and field_obj.load_from:
field_name = field_obj.load_from
raw_value = data.get(field_obj.load_from, missing)
if raw_value is missing:
_miss = field_obj.missing
raw_value = _miss() if callable(_miss) else _miss
if raw_value is missing and not field_obj.required:
continue
getter = lambda val: field_obj.deserialize(
val,
field_obj.load_from or attr_name,
data
)
value = self.call_and_store(
getter_func=getter,
data=raw_value,
field_name=field_name,
field_obj=field_obj,
index=(index if index_errors else None)
)
if value is not missing:
key = fields_dict[attr_name].attribute or attr_name
items.append((key, value))
ret = dict_class(items)
else:
ret = None
if preprocess:
preprocess = preprocess or []
for func in preprocess:
ret = func(ret)
if validators:
validators = validators or []
ret = self._validate(validators, ret, original_data, fields_dict=fields_dict,
strict=strict, index=(index if index_errors else None))
if self.errors and strict:
raise ValidationError(
self.errors,
field_names=self.error_field_names,
fields=self.error_fields
)
return ret
# Make an instance callable
__call__ = deserialize
| |
#!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example downloads an adgroup performance report for all child accounts.
To get report fields, run get_report_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import multiprocessing
import os
from Queue import Empty
import time
import googleads.adwords
import googleads.errors
# Timeout between retries in seconds.
BACKOFF_FACTOR = 5
# Maximum number of processes to spawn.
MAX_PROCESSES = multiprocessing.cpu_count()
# Maximum number of retries for 500 errors.
MAX_RETRIES = 5
# Maximum number of items to be sent in a single API response.
PAGE_SIZE = 100
# Directory to download the reports to.
REPORT_DOWNLOAD_DIRECTORY = 'INSERT_REPORT_DOWNLOAD_DIRECTORY'
def _DownloadReport(process_id, report_download_directory, customer_id,
report_definition):
"""Helper function used by ReportWorker to download customer report.
Note that multiprocessing differs between Windows / Unix environments. A
Process or its subclasses in Windows must be serializable with pickle, but
that is not possible for AdWordsClient or ReportDownloader. This top-level
function is used as a work-around for Windows support.
Args:
process_id: The PID of the process downloading the report.
report_download_directory: A string indicating the directory where you
would like to download the reports.
customer_id: A str AdWords customer ID for which the report is being
downloaded.
report_definition: A dict containing the report definition to be used.
Returns:
A tuple indicating a boolean success/failure status, and dict request
context.
"""
report_downloader = (googleads.adwords.AdWordsClient.LoadFromStorage()
.GetReportDownloader())
filepath = os.path.join(report_download_directory,
'adgroup_%d.csv' % customer_id)
retry_count = 0
while True:
print ('[%d/%d] Loading report for customer ID "%s" into "%s"...'
% (process_id, retry_count, customer_id, filepath))
try:
with open(filepath, 'wb') as handler:
report_downloader.DownloadReport(
report_definition, output=handler,
client_customer_id=customer_id)
return (True, {'customerId': customer_id})
except googleads.errors.AdWordsReportError as e:
if e.code == 500 and retry_count < MAX_RETRIES:
time.sleep(retry_count * BACKOFF_FACTOR)
else:
print ('Report failed for customer ID "%s" with code "%d" after "%d" '
'retries.' % (customer_id, e.code, retry_count+1))
return (False, {'customerId': customer_id, 'code': e.code,
'message': e.message})
class ReportWorker(multiprocessing.Process):
"""A worker Process used to download reports for a set of customer IDs."""
def __init__(self, report_download_directory, report_definition,
input_queue, success_queue, failure_queue):
"""Initializes a ReportWorker.
Args:
report_download_directory: A string indicating the directory where you
would like to download the reports.
report_definition: A dict containing the report definition that you would
like to run against all customer IDs in the input_queue.
input_queue: A Queue instance containing all of the customer IDs that
the report_definition will be run against.
success_queue: A Queue instance that the details of successful report
downloads will be saved to.
failure_queue: A Queue instance that the details of failed report
downloads will be saved to.
"""
super(ReportWorker, self).__init__()
self.report_download_directory = report_download_directory
self.report_definition = report_definition
self.input_queue = input_queue
self.success_queue = success_queue
self.failure_queue = failure_queue
def run(self):
while True:
try:
customer_id = self.input_queue.get(timeout=0.01)
except Empty:
break
result = _DownloadReport(self.ident, self.report_download_directory,
customer_id, self.report_definition)
(self.success_queue if result[0] else self.failure_queue).put(result[1])
def GetCustomerIDs(client):
"""Retrieves all CustomerIds in the account hierarchy.
Note that your configuration file must specify a client_customer_id belonging
to an AdWords manager account.
Args:
client: an AdWordsClient instance.
Raises:
Exception: if no CustomerIds could be found.
Returns:
A Queue instance containing all CustomerIds in the account hierarchy.
"""
# For this example, we will use ManagedCustomerService to get all IDs in
# hierarchy that do not belong to MCC accounts.
managed_customer_service = client.GetService('ManagedCustomerService',
version='v201809')
offset = 0
# Get the account hierarchy for this account.
selector = {
'fields': ['CustomerId'],
'predicates': [{
'field': 'CanManageClients',
'operator': 'EQUALS',
'values': [False]
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
# Using Queue to balance load between processes.
queue = multiprocessing.Queue()
more_pages = True
while more_pages:
page = managed_customer_service.get(selector)
if page and 'entries' in page and page['entries']:
for entry in page['entries']:
queue.put(entry['customerId'])
else:
raise Exception('Can\'t retrieve any customer ID.')
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
return queue
def main(client, report_download_directory):
# Determine list of customer IDs to retrieve report for.
input_queue = GetCustomerIDs(client)
reports_succeeded = multiprocessing.Queue()
reports_failed = multiprocessing.Queue()
# Create report definition.
report_definition = {
'reportName': 'Custom ADGROUP_PERFORMANCE_REPORT',
'dateRangeType': 'LAST_7_DAYS',
'reportType': 'ADGROUP_PERFORMANCE_REPORT',
'downloadFormat': 'CSV',
'selector': {
'fields': ['CampaignId', 'AdGroupId', 'Impressions', 'Clicks',
'Cost'],
# Predicates are optional.
'predicates': {
'field': 'AdGroupStatus',
'operator': 'IN',
'values': ['ENABLED', 'PAUSED']
}
},
}
queue_size = input_queue.qsize()
num_processes = min(queue_size, MAX_PROCESSES)
print 'Retrieving %d reports with %d processes:' % (queue_size, num_processes)
# Start all the processes.
processes = [ReportWorker(report_download_directory,
report_definition, input_queue, reports_succeeded,
reports_failed)
for _ in range(num_processes)]
for process in processes:
process.start()
for process in processes:
process.join()
print 'Finished downloading reports with the following results:'
while True:
try:
success = reports_succeeded.get(timeout=0.01)
except Empty:
break
print '\tReport for CustomerId "%d" succeeded.' % success['customerId']
while True:
try:
failure = reports_failed.get(timeout=0.01)
except Empty:
break
print ('\tReport for CustomerId "%d" failed with error code "%s" and '
'message: %s.' % (failure['customerId'], failure['code'],
failure['message']))
if __name__ == '__main__':
adwords_client = googleads.adwords.AdWordsClient.LoadFromStorage()
main(adwords_client, REPORT_DOWNLOAD_DIRECTORY)
| |
#!/usr/bin/env python
#
# Copyright 2014 Docalytics Inc, Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for pytracts.messages."""
from tests import test_util
__author__ = 'rafek@google.com (Rafe Kaplan)'
import new
import re
import sys
import unittest
from pytracts import message_types
from pytracts import messages
class ModuleInterfaceTest(test_util.ModuleInterfaceTest, test_util.TestCase):
MODULE = messages
class ValidationErrorTest(test_util.TestCase):
def test_str_no_field_name(self):
"""Test string version of ValidationError when no name provided."""
self.assertEquals('Validation error',str(messages.ValidationError('Validation error')))
def test_str_field_name(self):
"""Test string version of ValidationError when no name provided."""
validation_error = messages.ValidationError('Validation error')
validation_error.field_name = 'a_field'
self.assertEquals('Validation error', str(validation_error))
class EnumTest(test_util.TestCase):
def setUp(self):
"""Set up tests."""
# Redefine Color class in case so that changes to it (an error) in one test
# does not affect other tests.
global Color
class Color(messages.Enum):
RED = 20
ORANGE = 2
YELLOW = 40
GREEN = 4
BLUE = 50
INDIGO = 5
VIOLET = 80
def testNames(self):
"""Test that names iterates over enum names."""
self.assertEquals(
{'BLUE', 'GREEN', 'INDIGO', 'ORANGE', 'RED', 'VIOLET', 'YELLOW'},
set(Color.names()))
def testNumbers(self):
"""Tests that numbers iterates of enum numbers."""
self.assertEquals({2, 4, 5, 20, 40, 50, 80}, set(Color.numbers()))
def testIterate(self):
"""Test that __iter__ iterates over all enum values."""
self.assertEquals(set(Color),
{Color.RED,
Color.ORANGE,
Color.YELLOW,
Color.GREEN,
Color.BLUE,
Color.INDIGO,
Color.VIOLET})
def testNaturalOrder(self):
"""Test that natural order enumeration is in numeric order."""
self.assertEquals([Color.ORANGE,
Color.GREEN,
Color.INDIGO,
Color.RED,
Color.YELLOW,
Color.BLUE,
Color.VIOLET],
sorted(Color))
def testByName(self):
"""Test look-up by name."""
self.assertEquals(Color.RED, Color.lookup_by_name('RED'))
self.assertRaises(KeyError, Color.lookup_by_name, 20)
self.assertRaises(KeyError, Color.lookup_by_name, Color.RED)
def testByNumber(self):
"""Test look-up by number."""
self.assertRaises(KeyError, Color.lookup_by_number, 'RED')
self.assertEquals(Color.RED, Color.lookup_by_number(20))
self.assertRaises(KeyError, Color.lookup_by_number, Color.RED)
def testConstructor(self):
"""Test that constructor look-up by name or number."""
self.assertEquals(Color.RED, Color('RED'))
self.assertEquals(Color.RED, Color(u'RED'))
self.assertEquals(Color.RED, Color(20))
self.assertEquals(Color.RED, Color(20L))
self.assertEquals(Color.RED, Color(Color.RED))
self.assertRaises(TypeError, Color, 'Not exists')
self.assertRaises(TypeError, Color, 'Red')
self.assertRaises(TypeError, Color, 100)
self.assertRaises(TypeError, Color, 10.0)
def testLen(self):
"""Test that len function works to count enums."""
self.assertEquals(7, len(Color))
def testNoSubclasses(self):
"""Test that it is not possible to sub-class enum classes."""
def declare_subclass():
class MoreColor(Color):
pass
self.assertRaises(messages.EnumDefinitionError,
declare_subclass)
def testClassNotMutable(self):
"""Test that enum classes themselves are not mutable."""
self.assertRaises(AttributeError,
setattr,
Color,
'something_new',
10)
def testInstancesMutable(self):
"""Test that enum instances are not mutable."""
self.assertRaises(TypeError,
setattr,
Color.RED,
'something_new',
10)
def testDefEnum(self):
"""Test def_enum works by building enum class from dict."""
WeekDay = messages.Enum.def_enum({'Monday': 1,
'Tuesday': 2,
'Wednesday': 3,
'Thursday': 4,
'Friday': 6,
'Saturday': 7,
'Sunday': 8},
'WeekDay')
self.assertEquals('Wednesday', WeekDay(3).name)
self.assertEquals(6, WeekDay('Friday').number)
self.assertEquals(WeekDay.Sunday, WeekDay('Sunday'))
def testNonInt(self):
"""Test that non-integer values rejection by enum def."""
self.assertRaises(messages.EnumDefinitionError,
messages.Enum.def_enum,
{'Bad': '1'},
'BadEnum')
def testNegativeInt(self):
"""Test that negative numbers rejection by enum def."""
self.assertRaises(messages.EnumDefinitionError,
messages.Enum.def_enum,
{'Bad': -1},
'BadEnum')
def testLowerBound(self):
"""Test that zero is accepted by enum def."""
class NotImportant(messages.Enum):
"""Testing for value zero"""
VALUE = 0
self.assertEquals(0, int(NotImportant.VALUE))
def testTooLargeInt(self):
"""Test that numbers too large are rejected."""
self.assertRaises(messages.EnumDefinitionError,
messages.Enum.def_enum,
{'Bad': (2 ** 29)},
'BadEnum')
def testRepeatedInt(self):
"""Test duplicated numbers are forbidden."""
self.assertRaises(messages.EnumDefinitionError,
messages.Enum.def_enum,
{'Ok': 1, 'Repeated': 1},
'BadEnum')
def testStr(self):
"""Test converting to string."""
self.assertEquals('RED', str(Color.RED))
self.assertEquals('ORANGE', str(Color.ORANGE))
def testInt(self):
"""Test converting to int."""
self.assertEquals(20, int(Color.RED))
self.assertEquals(2, int(Color.ORANGE))
def testRepr(self):
"""Test enum representation."""
self.assertEquals('Color(RED, 20)', repr(Color.RED))
self.assertEquals('Color(YELLOW, 40)', repr(Color.YELLOW))
def testDocstring(self):
"""Test that docstring is supported ok."""
class NotImportant(messages.Enum):
"""I have a docstring."""
VALUE1 = 1
self.assertEquals('I have a docstring.', NotImportant.__doc__)
def testDeleteEnumValue(self):
"""Test that enum values cannot be deleted."""
self.assertRaises(TypeError, delattr, Color, 'RED')
def testEnumName(self):
"""Test enum name."""
module_name = test_util.get_module_name(EnumTest)
self.assertEquals('%s.Color' % module_name, Color.definition_name())
self.assertEquals(module_name, Color.outer_definition_name())
self.assertEquals(module_name, Color.definition_package())
def testDefinitionName_OverrideModule(self):
"""Test enum module is overriden by module package name."""
global package
try:
package = 'my.package'
self.assertEquals('my.package.Color', Color.definition_name())
self.assertEquals('my.package', Color.outer_definition_name())
self.assertEquals('my.package', Color.definition_package())
finally:
del package
def testDefinitionName_NoModule(self):
"""Test what happens when there is no module for enum."""
class Enum1(messages.Enum):
pass
original_modules = sys.modules
sys.modules = dict(sys.modules)
try:
del sys.modules[__name__]
self.assertEquals('Enum1', Enum1.definition_name())
self.assertEquals(None, Enum1.outer_definition_name())
self.assertEquals(None, Enum1.definition_package())
self.assertEquals(unicode, type(Enum1.definition_name()))
finally:
sys.modules = original_modules
def testDefinitionName_Nested(self):
"""Test nested Enum names."""
class MyMessage(messages.Message):
class NestedEnum(messages.Enum):
pass
class NestedMessage(messages.Message):
class NestedEnum(messages.Enum):
pass
module_name = test_util.get_module_name(EnumTest)
self.assertEquals('%s.MyMessage.NestedEnum' % module_name,
MyMessage.NestedEnum.definition_name())
self.assertEquals('%s.MyMessage' % module_name,
MyMessage.NestedEnum.outer_definition_name())
self.assertEquals(module_name,
MyMessage.NestedEnum.definition_package())
self.assertEquals('%s.MyMessage.NestedMessage.NestedEnum' % module_name,
MyMessage.NestedMessage.NestedEnum.definition_name())
self.assertEquals(
'%s.MyMessage.NestedMessage' % module_name,
MyMessage.NestedMessage.NestedEnum.outer_definition_name())
self.assertEquals(module_name,
MyMessage.NestedMessage.NestedEnum.definition_package())
def testMessageDefinition(self):
"""Test that enumeration knows its enclosing message definition."""
class OuterEnum(messages.Enum):
pass
self.assertEquals(None, OuterEnum.message_definition())
class OuterMessage(messages.Message):
class InnerEnum(messages.Enum):
pass
self.assertEquals(OuterMessage, OuterMessage.InnerEnum.message_definition())
def testComparison(self):
"""Test comparing various enums to different types."""
class Enum1(messages.Enum):
VAL1 = 1
VAL2 = 2
class Enum2(messages.Enum):
VAL1 = 1
self.assertEquals(Enum1.VAL1, Enum1.VAL1)
self.assertNotEquals(Enum1.VAL1, Enum1.VAL2)
self.assertNotEquals(Enum1.VAL1, Enum2.VAL1)
self.assertNotEquals(Enum1.VAL1, 'VAL1')
self.assertNotEquals(Enum1.VAL1, 1)
self.assertNotEquals(Enum1.VAL1, 2)
self.assertNotEquals(Enum1.VAL1, None)
self.assertNotEquals(Enum1.VAL1, Enum2.VAL1)
self.assertTrue(Enum1.VAL1 < Enum1.VAL2)
self.assertTrue(Enum1.VAL2 > Enum1.VAL1)
self.assertNotEquals(1, Enum2.VAL1)
class FieldListTest(test_util.TestCase):
def setUp(self):
self.integer_field = messages.IntegerField(repeated=True)
def testConstructor(self):
self.assertEquals([1, 2, 3],
messages.FieldList(None, self.integer_field, [1, 2, 3]))
self.assertEquals([1, 2, 3],
messages.FieldList(None, self.integer_field, (1, 2, 3)))
self.assertEquals([], messages.FieldList(None, self.integer_field, []))
def testNone(self):
self.assertRaises(TypeError, messages.FieldList, self.integer_field, None)
def testDoNotAutoConvertString(self):
string_field = messages.StringField(repeated=True)
self.assertRaises(messages.ValidationError,
messages.FieldList, None, string_field, 'abc')
def testConstructorCopies(self):
a_list = [1, 3, 6]
field_list = messages.FieldList(None, self.integer_field, a_list)
self.assertFalse(a_list is field_list)
self.assertFalse(field_list is
messages.FieldList(None, self.integer_field, field_list))
def testNonRepeatedField(self):
self.assertRaisesWithRegexpMatch(
messages.FieldDefinitionError,
'FieldList may only accept repeated fields',
messages.FieldList,
None,
messages.IntegerField(),
[])
def testConstructor_InvalidValues(self):
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type (<type 'int'>, <type 'long'>) "
"for IntegerField, found 1 (type <type 'str'>)"),
messages.FieldList, None, self.integer_field, ["1", "2", "3"])
def testConstructor_Scalars(self):
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
"IntegerField is repeated. Found: 3",
messages.FieldList, None, self.integer_field, 3)
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
"IntegerField is repeated. Found: <listiterator object",
messages.FieldList, None, self.integer_field, iter([1, 2, 3]))
def testSetSlice(self):
field_list = messages.FieldList(None, self.integer_field, [1, 2, 3, 4, 5])
field_list[1:3] = [10, 20]
self.assertEquals([1, 10, 20, 4, 5], field_list)
def testSetSlice_InvalidValues(self):
field_list = messages.FieldList(None, self.integer_field, [1, 2, 3, 4, 5])
def setslice():
field_list[1:3] = ['10', '20']
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type (<type 'int'>, <type 'long'>) "
"for IntegerField, found 10 (type <type 'str'>)"),
setslice)
def testSetItem(self):
field_list = messages.FieldList(None, self.integer_field, [2])
field_list[0] = 10
self.assertEquals([10], field_list)
def testSetItem_InvalidValues(self):
field_list = messages.FieldList(None, self.integer_field, [2])
def setitem():
field_list[0] = '10'
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type (<type 'int'>, <type 'long'>) "
"for IntegerField, found 10 (type <type 'str'>)"),
setitem)
def testAppend(self):
field_list = messages.FieldList(None, self.integer_field, [2])
field_list.append(10)
self.assertEquals([2, 10], field_list)
def testAppend_InvalidValues(self):
field_list = messages.FieldList(None, self.integer_field, [2])
field_list.name = 'a_field'
def append():
field_list.append('10')
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type (<type 'int'>, <type 'long'>) "
"for IntegerField, found 10 (type <type 'str'>)"),
append)
def testExtend(self):
field_list = messages.FieldList(None, self.integer_field, [2])
field_list.extend([10])
self.assertEquals([2, 10], field_list)
def testExtend_InvalidValues(self):
field_list = messages.FieldList(None, self.integer_field, [2])
def extend():
field_list.extend(['10'])
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type (<type 'int'>, <type 'long'>) "
"for IntegerField, found 10 (type <type 'str'>)"),
extend)
def testInsert(self):
field_list = messages.FieldList(None, self.integer_field, [2, 3])
field_list.insert(1, 10)
self.assertEquals([2, 10, 3], field_list)
def testInsert_InvalidValues(self):
field_list = messages.FieldList(None, self.integer_field, [2, 3])
def insert():
field_list.insert(1, '10')
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
re.escape("Expected type (<type 'int'>, <type 'long'>) "
"for IntegerField, found 10 (type <type 'str'>)"),
insert)
class FieldTest(test_util.TestCase):
def ActionOnAllFieldClasses(self, action):
"""Test all field classes except Message and Enum.
Message and Enum require separate tests.
Args:
action: Callable that takes the field class as a parameter.
"""
for field_class in (messages.IntegerField,
messages.FloatField,
messages.BooleanField,
messages.BytesField,
messages.StringField,
):
action(field_class)
def testRequiredAndRepeated(self):
"""Test setting the required and repeated fields."""
def action(field_class):
field_class(required=True)
field_class(repeated=True)
self.assertRaises(messages.FieldDefinitionError,
field_class,
required=True,
repeated=True)
self.ActionOnAllFieldClasses(action)
def testInvalidVariant(self):
"""Test field with invalid variants."""
def action(field_class):
if field_class is not messages.DateTimeISO8601Field and field_class is not messages.DateTimeMsIntegerField:
self.assertRaises(messages.InvalidVariantError,
field_class,
variant=messages.Variant.ENUM)
self.ActionOnAllFieldClasses(action)
def testDefaultVariant(self):
"""Test that default variant is used when not set."""
def action(field_class):
field = field_class()
self.assertEquals(field_class.DEFAULT_VARIANT, field.variant)
self.ActionOnAllFieldClasses(action)
def testAlternateVariant(self):
"""Test that default variant is used when not set."""
field = messages.IntegerField(variant=messages.Variant.UINT32)
self.assertEquals(messages.Variant.UINT32, field.variant)
def testDefaultFields_Single(self):
"""Test default field is correct type."""
defaults = {messages.IntegerField: 10,
messages.FloatField: 1.5,
messages.BooleanField: False,
messages.BytesField: 'abc',
messages.StringField: u'abc',
}
def action(field_class):
field_class(default=defaults[field_class])
self.ActionOnAllFieldClasses(action)
# Run defaults test again checking for str/unicode compatiblity.
defaults[messages.StringField] = 'abc'
self.ActionOnAllFieldClasses(action)
def testStringField_BadUnicodeInDefault(self):
"""Test binary values in string field."""
self.assertRaisesWithRegexpMatch(
messages.InvalidDefaultError,
'Invalid default value for StringField: \211: '
'Field encountered non-ASCII string \211:',
messages.StringField, default='\x89')
def testDefaultFields_InvalidSingle(self):
"""Test default field is correct type."""
def action(field_class):
self.assertRaises(messages.InvalidDefaultError,
field_class,
default=object())
self.ActionOnAllFieldClasses(action)
def testDefaultFields_InvalidRepeated(self):
"""Test default field does not accept defaults."""
self.assertRaisesWithRegexpMatch(
messages.FieldDefinitionError,
'Repeated fields may not have defaults',
messages.StringField, repeated=True, default=[1, 2, 3])
def testDefaultFields_None(self):
"""Test none is always acceptable."""
def action(field_class):
field_class(default=None)
field_class(required=True, default=None)
field_class(repeated=True, default=None)
self.ActionOnAllFieldClasses(action)
def testDefaultFields_Enum(self):
"""Test the default for enum fields."""
class Symbol(messages.Enum):
ALPHA = 1
BETA = 2
GAMMA = 3
field = messages.EnumField(Symbol, default=Symbol.ALPHA)
self.assertEquals(Symbol.ALPHA, field.default)
def testValidate_Valid(self):
"""Test validation of valid values."""
values = {messages.IntegerField: 10,
messages.FloatField: 1.5,
messages.BooleanField: False,
messages.BytesField: 'abc',
messages.StringField: u'abc',
}
def action(field_class):
# Optional.
field = field_class()
field.validate(values[field_class])
# Required.
field = field_class(required=True)
field.validate(values[field_class])
# Repeated.
field = field_class(repeated=True)
field.validate([])
field.validate(())
field.validate([values[field_class]])
field.validate((values[field_class],))
# Right value, but not repeated.
self.assertRaises(messages.ValidationError,
field.validate,
values[field_class])
self.assertRaises(messages.ValidationError,
field.validate,
values[field_class])
self.ActionOnAllFieldClasses(action)
def testValidate_Invalid(self):
"""Test validation of valid values."""
values = {messages.IntegerField: "10",
messages.FloatField: 1,
messages.BooleanField: 0,
messages.BytesField: 10.20,
messages.StringField: 42,
}
def action(field_class):
# Optional.
field = field_class()
self.assertRaises(messages.ValidationError,
field.validate,
values[field_class])
# Required.
field = field_class(required=True)
self.assertRaises(messages.ValidationError,
field.validate,
values[field_class])
# Repeated.
field = field_class(repeated=True)
self.assertRaises(messages.ValidationError,
field.validate,
[values[field_class]])
self.assertRaises(messages.ValidationError,
field.validate,
(values[field_class],))
self.ActionOnAllFieldClasses(action)
def testValidate_None(self):
"""Test that None is valid for non-required fields."""
def action(field_class):
# Optional.
field = field_class()
field.validate(None)
# Required.
field = field_class(required=True)
self.assertRaisesWithRegexpMatch(messages.ValidationError,
'Required field is missing',
field.validate,
None)
# Repeated.
field = field_class(repeated=True)
field.validate(None)
self.assertRaisesWithRegexpMatch(messages.ValidationError,
'Repeated values for %s may '
'not be None' % field_class.__name__,
field.validate,
[None])
self.assertRaises(messages.ValidationError,
field.validate,
(None,))
self.ActionOnAllFieldClasses(action)
def testValidateElement(self):
"""Test validation of valid values."""
values = {messages.IntegerField: 10,
messages.FloatField: 1.5,
messages.BooleanField: False,
messages.BytesField: 'abc',
messages.StringField: u'abc',
}
def action(field_class):
# Optional.
field = field_class(1)
field.validate_element(values[field_class])
# Required.
field = field_class(1, required=True)
field.validate_element(values[field_class])
# Repeated.
field = field_class(1, repeated=True)
self.assertRaises(messages.ValidationError,
field.validate_element,
[])
self.assertRaises(messages.ValidationError,
field.validate_element,
())
field.validate_element(values[field_class])
field.validate_element(values[field_class])
# Right value, but repeated.
self.assertRaises(messages.ValidationError,
field.validate_element,
[values[field_class]])
self.assertRaises(messages.ValidationError,
field.validate_element,
(values[field_class],))
def testReadOnly(self):
"""Test that objects are all read-only."""
def action(field_class):
field = field_class()
self.assertRaises(AttributeError,
setattr,
field,
'number',
20)
self.assertRaises(AttributeError,
setattr,
field,
'anything_else',
'whatever')
self.ActionOnAllFieldClasses(action)
def testMessageField(self):
"""Test the construction of message fields."""
self.assertRaises(messages.FieldDefinitionError,
messages.MessageField,
str)
self.assertRaises(messages.FieldDefinitionError,
messages.MessageField,
messages.Message)
class MyMessage(messages.Message):
pass
field = messages.MessageField(MyMessage)
self.assertEquals(MyMessage, field.type)
def testMessageField_ForwardReference(self):
"""Test the construction of forward reference message fields."""
global MyMessage
global ForwardMessage
try:
class MyMessage(messages.Message):
self_reference = messages.MessageField('MyMessage')
forward = messages.MessageField('ForwardMessage')
nested = messages.MessageField('ForwardMessage.NestedMessage')
inner = messages.MessageField('Inner')
class Inner(messages.Message):
sibling = messages.MessageField('Sibling')
class Sibling(messages.Message):
pass
class ForwardMessage(messages.Message):
class NestedMessage(messages.Message):
pass
self.assertEquals(MyMessage,
MyMessage.field_by_name('self_reference').type)
self.assertEquals(ForwardMessage,
MyMessage.field_by_name('forward').type)
self.assertEquals(ForwardMessage.NestedMessage,
MyMessage.field_by_name('nested').type)
self.assertEquals(MyMessage.Inner,
MyMessage.field_by_name('inner').type)
self.assertEquals(MyMessage.Sibling,
MyMessage.Inner.field_by_name('sibling').type)
finally:
try:
del MyMessage
del ForwardMessage
except:
pass
def testMessageField_WrongType(self):
"""Test that forward referencing the wrong type raises an error."""
global AnEnum
try:
class AnEnum(messages.Enum):
pass
class AnotherMessage(messages.Message):
a_field = messages.MessageField('AnEnum')
self.assertRaises(messages.FieldDefinitionError,
getattr,
AnotherMessage.field_by_name('a_field'),
'type')
finally:
del AnEnum
def testMessageFieldValidate(self):
"""Test validation on message field."""
class MyMessage(messages.Message):
pass
class AnotherMessage(messages.Message):
pass
field = messages.MessageField(MyMessage)
field.validate(MyMessage())
self.assertRaises(messages.ValidationError,
field.validate,
AnotherMessage())
def testMessageFieldMessageType(self):
"""Test message_type property."""
class MyMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage)
self.assertEqual(HasMessage.field.type, HasMessage.field.message_type)
def testMessageFieldValueFromMessage(self):
class MyMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage)
instance = MyMessage()
self.assertIs(instance, HasMessage.field.value_from_message(instance))
def testMessageFieldValueFromMessageWrongType(self):
class MyMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage)
self.assertRaisesWithRegexpMatch(
messages.DecodeError,
'Expected type MyMessage, got int: 10',
HasMessage.field.value_from_message, 10)
def testMessageFieldValueToMessage(self):
class MyMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage)
instance = MyMessage()
self.assertIs(instance, HasMessage.field.value_to_message(instance))
def testMessageFieldValueToMessageWrongType(self):
class MyMessage(messages.Message):
pass
class MyOtherMessage(messages.Message):
pass
class HasMessage(messages.Message):
field = messages.MessageField(MyMessage)
instance = MyOtherMessage()
self.assertRaisesWithRegexpMatch(
messages.EncodeError,
'Expected type MyMessage, got MyOtherMessage: <MyOtherMessage>',
HasMessage.field.value_to_message, instance)
def testIntegerField_AllowLong(self):
"""Test that the integer field allows for longs."""
messages.IntegerField(default=long(10))
def testMessageFieldValidate_Initialized(self):
"""Test validation on message field."""
class MyMessage(messages.Message):
field1 = messages.IntegerField(required=True)
field = messages.MessageField(MyMessage)
# Will validate messages where is_initialized() is False.
message = MyMessage()
field.validate(message)
message.field1 = 20
field.validate(message)
def testEnumField(self):
"""Test the construction of enum fields."""
self.assertRaises(messages.FieldDefinitionError,
messages.EnumField,
str)
self.assertRaises(messages.FieldDefinitionError,
messages.EnumField,
messages.Enum)
class Color(messages.Enum):
RED = 1
GREEN = 2
BLUE = 3
field = messages.EnumField(Color)
self.assertEquals(Color, field.type)
class Another(messages.Enum):
VALUE = 1
self.assertRaises(messages.InvalidDefaultError,
messages.EnumField,
Color,
default=Another.VALUE)
def testEnumField_ForwardReference(self):
"""Test the construction of forward reference enum fields."""
global MyMessage
global ForwardEnum
global ForwardMessage
try:
class MyMessage(messages.Message):
forward = messages.EnumField('ForwardEnum')
nested = messages.EnumField('ForwardMessage.NestedEnum')
inner = messages.EnumField('Inner')
class Inner(messages.Enum):
pass
class ForwardEnum(messages.Enum):
pass
class ForwardMessage(messages.Message):
class NestedEnum(messages.Enum):
pass
self.assertEquals(ForwardEnum,
MyMessage.field_by_name('forward').type)
self.assertEquals(ForwardMessage.NestedEnum,
MyMessage.field_by_name('nested').type)
self.assertEquals(MyMessage.Inner,
MyMessage.field_by_name('inner').type)
finally:
try:
del MyMessage
del ForwardEnum
del ForwardMessage
except:
pass
def testEnumField_WrongType(self):
"""Test that forward referencing the wrong type raises an error."""
global AMessage
try:
class AMessage(messages.Message):
pass
class AnotherMessage(messages.Message):
a_field = messages.EnumField('AMessage')
self.assertRaises(messages.FieldDefinitionError,
getattr,
AnotherMessage.field_by_name('a_field'),
'type')
finally:
del AMessage
def testMessageDefinition(self):
"""Test that message definition is set on fields."""
class MyMessage(messages.Message):
my_field = messages.StringField()
self.assertEquals(MyMessage,
MyMessage.field_by_name('my_field').message_definition())
def testNoneAssignment(self):
"""Test that assigning None does changes comparison."""
class MyMessage(messages.Message):
my_field = messages.StringField()
m1 = MyMessage()
m2 = MyMessage()
m2.my_field = None
self.assertNotEquals(m1, m2)
def testNonAsciiStr(self):
"""Test validation fails for non-ascii StringField values."""
class Thing(messages.Message):
string_field = messages.StringField()
thing = Thing()
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
'Field string_field encountered non-ASCII string',
setattr, thing, 'string_field', test_util.BINARY)
def testCoerceValue(self):
class Person(messages.Message):
weight = messages.IntegerField()
p = Person()
p.weight = Person.weight.coerce(123)
self.assertEquals(123, p.weight)
p.weight = Person.weight.coerce(123.45)
self.assertEquals(123, p.weight)
p.weight = Person.weight.coerce('123')
self.assertEquals(123, p.weight)
p.weight = Person.weight.coerce(None)
self.assertEquals(None, p.weight)
def testCoerceValueInvalid(self):
class Person(messages.Message):
weight = messages.IntegerField()
self.assertRaises(ValueError, lambda: Person.weight.coerce({}))
class MessageTest(test_util.TestCase):
"""Tests for message class."""
def CreateMessageClass(self):
"""Creates a simple message class with 3 fields.
Fields are defined in alphabetical order but with conflicting numeric
order.
"""
class ComplexMessage(messages.Message):
a3 = messages.IntegerField()
b1 = messages.StringField()
c2 = messages.StringField()
return ComplexMessage
def testStrictAssignment(self):
"""Tests that cannot assign to unknown or non-reserved attributes."""
class SimpleMessage(messages.Message):
field = messages.IntegerField()
simple_message = SimpleMessage()
self.assertRaises(AttributeError,
setattr,
simple_message,
'does_not_exist',
10)
def testListAssignmentDoesNotCopy(self):
class SimpleMessage(messages.Message):
repeated = messages.IntegerField(repeated=True)
message = SimpleMessage()
original = message.repeated
message.repeated = []
self.assertFalse(original is message.repeated)
def testValidate_Optional(self):
"""Tests validation of optional fields."""
class SimpleMessage(messages.Message):
non_required = messages.IntegerField()
simple_message = SimpleMessage()
simple_message.check_initialized()
simple_message.non_required = 10
simple_message.check_initialized()
def testValidate_Required(self):
"""Tests validation of required fields."""
class SimpleMessage(messages.Message):
required = messages.IntegerField(required=True)
simple_message = SimpleMessage()
self.assertRaises(messages.ValidationError,
simple_message.check_initialized)
simple_message.required = 10
simple_message.check_initialized()
def testValidate_Repeated(self):
"""Tests validation of repeated fields."""
class SimpleMessage(messages.Message):
repeated = messages.IntegerField(repeated=True)
simple_message = SimpleMessage()
# Check valid values.
for valid_value in [], [10], [10, 20], (), (10,), (10, 20):
simple_message.repeated = valid_value
simple_message.check_initialized()
# Check cleared.
simple_message.repeated = []
simple_message.check_initialized()
# Check invalid values.
for invalid_value in 10, ['10', '20'], [None], (None,):
self.assertRaises(messages.ValidationError,
setattr, simple_message, 'repeated', invalid_value)
def testIsInitialized(self):
"""Tests is_initialized."""
class SimpleMessage(messages.Message):
required = messages.IntegerField(required=True)
simple_message = SimpleMessage()
self.assertFalse(simple_message.is_initialized())
simple_message.required = 10
self.assertTrue(simple_message.is_initialized())
def testIsInitializedNestedField(self):
"""Tests is_initialized for nested fields."""
class SimpleMessage(messages.Message):
required = messages.IntegerField(required=True)
class NestedMessage(messages.Message):
simple = messages.MessageField(SimpleMessage)
simple_message = SimpleMessage()
self.assertFalse(simple_message.is_initialized())
nested_message = NestedMessage(simple=simple_message)
self.assertFalse(nested_message.is_initialized())
simple_message.required = 10
self.assertTrue(simple_message.is_initialized())
self.assertTrue(nested_message.is_initialized())
def testInitializeNestedFieldFromDict(self):
"""Tests initializing nested fields from dict."""
class SimpleMessage(messages.Message):
required = messages.IntegerField(required=True)
class NestedMessage(messages.Message):
simple = messages.MessageField(SimpleMessage)
class RepeatedMessage(messages.Message):
simple = messages.MessageField(SimpleMessage, repeated=True)
nested_message1 = NestedMessage(simple={'required': 10})
self.assertTrue(nested_message1.is_initialized())
self.assertTrue(nested_message1.simple.is_initialized())
nested_message2 = NestedMessage()
nested_message2.simple = {'required': 10}
self.assertTrue(nested_message2.is_initialized())
self.assertTrue(nested_message2.simple.is_initialized())
repeated_values = [{}, {'required': 10}, SimpleMessage(required=20)]
repeated_message1 = RepeatedMessage(simple=repeated_values)
self.assertEquals(3, len(repeated_message1.simple))
self.assertFalse(repeated_message1.is_initialized())
repeated_message1.simple[0].required = 0
self.assertTrue(repeated_message1.is_initialized())
repeated_message2 = RepeatedMessage()
repeated_message2.simple = repeated_values
self.assertEquals(3, len(repeated_message2.simple))
self.assertFalse(repeated_message2.is_initialized())
repeated_message2.simple[0].required = 0
self.assertTrue(repeated_message2.is_initialized())
def testNestedMethodsNotAllowed(self):
"""Test that method definitions on Message classes are not allowed."""
def action():
class WithMethods(messages.Message):
def not_allowed(self):
pass
self.assertRaises(messages.MessageDefinitionError,
action)
def testNestedAttributesNotAllowed(self):
"""Test that attribute assignment on Message classes are not allowed."""
class Color(messages.Enum):
RED = 1
GREEN = 2
BLUE = 3
def int_attribute():
class WithMethods(messages.Message):
not_allowed = 1
def string_attribute():
class WithMethods(messages.Message):
not_allowed = 'not allowed'
def enum_attribute():
class WithMethods(messages.Message):
not_allowed = Color.RED
for action in (int_attribute, string_attribute, enum_attribute):
self.assertRaises(messages.MessageDefinitionError, action)
def testNameIsSetOnFields(self):
"""Make sure name is set on fields after Message class init."""
class HasNamedFields(messages.Message):
field = messages.StringField()
self.assertEquals('field', HasNamedFields.field_by_name('field').name)
def testSubclassingMessageDisallowed(self):
"""Not permitted to create sub-classes of message classes."""
class SuperClass(messages.Message):
pass
def action():
class SubClass(SuperClass):
pass
self.assertRaises(messages.MessageDefinitionError,
action)
def testAllFields(self):
"""Test all_fields method."""
ComplexMessage = self.CreateMessageClass()
fields = list(ComplexMessage.all_fields())
# Order does not matter, so sort now.
fields = sorted(fields, lambda f1, f2: cmp(f1.name, f2.name))
self.assertEquals(3, len(fields))
self.assertEquals('a3', fields[0].name)
self.assertEquals('b1', fields[1].name)
self.assertEquals('c2', fields[2].name)
def testFieldByName(self):
"""Test getting field by name."""
ComplexMessage = self.CreateMessageClass()
self.assertEquals('a3', ComplexMessage.field_by_name('a3').name)
self.assertEquals('b1', ComplexMessage.field_by_name('b1').name)
self.assertEquals('c2', ComplexMessage.field_by_name('c2').name)
self.assertRaises(KeyError,
ComplexMessage.field_by_name,
'unknown')
def test_has_value_set(self):
class SimpleMessage(messages.Message):
s = messages.StringField(name='string')
i = messages.IntegerField()
r = messages.StringField(repeated=True)
m = SimpleMessage()
self.assertFalse(m.has_value_assigned('string'))
self.assertFalse(m.has_value_assigned('i'))
self.assertFalse(m.has_value_assigned('r'))
self.assertFalse(SimpleMessage.s.is_set(m))
self.assertFalse(SimpleMessage.i.is_set(m))
self.assertFalse(SimpleMessage.r.is_set(m))
self.assertEquals(None, m.s)
self.assertEquals(None, m.i)
self.assertEquals([], m.r)
m = SimpleMessage()
m.s = None
self.assertTrue(m.has_value_assigned('string'))
self.assertFalse(m.has_value_assigned('i'))
self.assertFalse(m.has_value_assigned('r'))
self.assertTrue(SimpleMessage.s.is_set(m))
self.assertFalse(SimpleMessage.i.is_set(m))
self.assertFalse(SimpleMessage.r.is_set(m))
self.assertEquals(None, m.s)
self.assertEquals(None, m.i)
self.assertEquals([], m.r)
m = SimpleMessage()
m.i = None
self.assertFalse(m.has_value_assigned('string'))
self.assertTrue(m.has_value_assigned('i'))
self.assertFalse(m.has_value_assigned('r'))
self.assertFalse(SimpleMessage.s.is_set(m))
self.assertTrue(SimpleMessage.i.is_set(m))
self.assertFalse(SimpleMessage.r.is_set(m))
self.assertEquals(None, m.s)
self.assertEquals(None, m.i)
self.assertEquals([], m.r)
m = SimpleMessage()
m.s = "foo"
self.assertTrue(m.has_value_assigned('string'))
self.assertFalse(m.has_value_assigned('i'))
self.assertFalse(m.has_value_assigned('r'))
self.assertTrue(SimpleMessage.s.is_set(m))
self.assertFalse(SimpleMessage.i.is_set(m))
self.assertFalse(SimpleMessage.r.is_set(m))
self.assertEquals("foo", m.s)
self.assertEquals(None, m.i)
self.assertEquals([], m.r)
m = SimpleMessage()
m.i = 123
self.assertFalse(m.has_value_assigned('string'))
self.assertTrue(m.has_value_assigned('i'))
self.assertFalse(m.has_value_assigned('r'))
self.assertFalse(SimpleMessage.s.is_set(m))
self.assertTrue(SimpleMessage.i.is_set(m))
self.assertFalse(SimpleMessage.r.is_set(m))
self.assertEquals(None, m.s)
self.assertEquals(123, m.i)
self.assertEquals([], m.r)
m = SimpleMessage()
m.r = []
self.assertFalse(m.has_value_assigned('string'))
self.assertFalse(m.has_value_assigned('i'))
self.assertTrue(m.has_value_assigned('r'))
self.assertFalse(SimpleMessage.s.is_set(m))
self.assertFalse(SimpleMessage.i.is_set(m))
self.assertTrue(SimpleMessage.r.is_set(m))
self.assertEquals(None, m.s)
self.assertEquals(None, m.i)
self.assertEquals([], m.r)
m = SimpleMessage()
m.r = ['foo', 'bar']
self.assertFalse(m.has_value_assigned('string'))
self.assertFalse(m.has_value_assigned('i'))
self.assertTrue(m.has_value_assigned('r'))
self.assertFalse(SimpleMessage.s.is_set(m))
self.assertFalse(SimpleMessage.i.is_set(m))
self.assertTrue(SimpleMessage.r.is_set(m))
self.assertEquals(None, m.s)
self.assertEquals(None, m.i)
self.assertEquals(['foo', 'bar'], m.r)
def test_has_value_set_repeated_with_alias(self):
class SimpleMessage(messages.Message):
s = messages.StringField(name='string', repeated=True)
i = messages.IntegerField()
r = messages.StringField(repeated=True)
m = SimpleMessage(s=['a', 'b'])
self.assertFalse(m.has_value_assigned('s'))
def test_has_value_set_repeated_values(self):
class WithRepeated(messages.Message):
foo = messages.IntegerField(repeated=True)
msg = WithRepeated()
self.assertEquals([], msg.foo)
self.assertFalse(WithRepeated.foo.is_set(msg))
msg = WithRepeated()
msg.foo = []
self.assertTrue(WithRepeated.foo.is_set(msg))
msg = WithRepeated()
msg.foo.append(1)
self.assertTrue(WithRepeated.foo.is_set(msg))
def test_repeated_values_dont_cross_messages(self):
class WithRepeated(messages.Message):
foo = messages.IntegerField(repeated=True)
msg1 = WithRepeated()
msg2 = WithRepeated()
self.assertEquals([], msg1.foo)
self.assertEquals([], msg2.foo)
self.assertFalse(WithRepeated.foo.is_set(msg1))
self.assertFalse(WithRepeated.foo.is_set(msg2))
msg1.foo = msg2.foo
self.assertEquals([], msg1.foo)
self.assertEquals([], msg2.foo)
self.assertTrue(WithRepeated.foo.is_set(msg1))
self.assertFalse(WithRepeated.foo.is_set(msg2))
msg1.foo.append(1)
self.assertEquals([1], msg1.foo)
self.assertEquals([], msg2.foo)
self.assertTrue(WithRepeated.foo.is_set(msg1))
self.assertFalse(WithRepeated.foo.is_set(msg2))
def testGetAssignedValue(self):
"""Test getting the assigned value of a field."""
class SomeMessage(messages.Message):
a_value = messages.StringField(default=u'a default')
message = SomeMessage()
self.assertRaises(KeyError, lambda: message.get_assigned_value('a_value'))
message.a_value = u'a string'
self.assertEquals(u'a string', message.get_assigned_value('a_value'))
message.a_value = u'a default'
self.assertEquals(u'a default', message.get_assigned_value('a_value'))
self.assertRaises(KeyError, lambda: message.get_assigned_value('no_such_field'))
def test_unset(self):
"""Test resetting a field value."""
class SomeMessage(messages.Message):
a_value = messages.StringField(default=u'a default')
message = SomeMessage()
self.assertEquals(u'a default', message.a_value)
message.unset('a_value')
self.assertEquals(u'a default', message.a_value)
message.a_value = u'a new value'
self.assertEquals(u'a new value', message.a_value)
message.unset('a_value')
self.assertEquals(u'a default', message.a_value)
def testAllowNestedEnums(self):
"""Test allowing nested enums in a message definition."""
class Trade(messages.Message):
class Duration(messages.Enum):
GTC = 1
DAY = 2
class Currency(messages.Enum):
USD = 1
GBP = 2
INR = 3
# Sorted by name order seems to be the only feasible option.
self.assertEquals(['Currency', 'Duration'], Trade.__enums__)
# Message definition will now be set on Enumerated objects.
self.assertEquals(Trade, Trade.Duration.message_definition())
def testAllowNestedMessages(self):
"""Test allowing nested messages in a message definition."""
class Trade(messages.Message):
class Lot(messages.Message):
pass
class Agent(messages.Message):
pass
# Sorted by name order seems to be the only feasible option.
self.assertEquals(['Agent', 'Lot'], Trade.__messages__)
self.assertEquals(Trade, Trade.Agent.message_definition())
self.assertEquals(Trade, Trade.Lot.message_definition())
# But not Message itself.
def action():
class Trade(messages.Message):
NiceTry = messages.Message
self.assertRaises(messages.MessageDefinitionError, action)
def testDisallowClassAssignments(self):
"""Test setting class attributes may not happen."""
class MyMessage(messages.Message):
pass
self.assertRaises(AttributeError,
setattr,
MyMessage,
'x',
'do not assign')
def testEquality(self):
"""Test message class equality."""
# Comparison against enums must work.
class MyEnum(messages.Enum):
val1 = 1
val2 = 2
# Comparisons against nested messages must work.
class AnotherMessage(messages.Message):
string = messages.StringField()
class MyMessage(messages.Message):
field1 = messages.IntegerField()
field2 = messages.EnumField(MyEnum)
field3 = messages.MessageField(AnotherMessage)
message1 = MyMessage()
self.assertNotEquals('hi', message1)
self.assertNotEquals(AnotherMessage(), message1)
self.assertEquals(message1, message1)
message2 = MyMessage()
self.assertEquals(message1, message2)
message1.field1 = 10
self.assertNotEquals(message1, message2)
message2.field1 = 20
self.assertNotEquals(message1, message2)
message2.field1 = 10
self.assertEquals(message1, message2)
message1.field2 = MyEnum.val1
self.assertNotEquals(message1, message2)
message2.field2 = MyEnum.val2
self.assertNotEquals(message1, message2)
message2.field2 = MyEnum.val1
self.assertEquals(message1, message2)
message1.field3 = AnotherMessage()
message1.field3.string = 'value1'
self.assertNotEquals(message1, message2)
message2.field3 = AnotherMessage()
message2.field3.string = 'value2'
self.assertNotEquals(message1, message2)
message2.field3.string = 'value1'
self.assertEquals(message1, message2)
def testEqualityWithUnknowns(self):
"""Test message class equality with unknown fields."""
class MyMessage(messages.Message):
field1 = messages.IntegerField()
message1 = MyMessage()
message2 = MyMessage()
self.assertEquals(message1, message2)
message1.set_unrecognized_field('unknown1', 'value1',
messages.Variant.STRING)
self.assertEquals(message1, message2)
message1.set_unrecognized_field('unknown2', ['asdf', 3],
messages.Variant.STRING)
message1.set_unrecognized_field('unknown3', 4.7,
messages.Variant.DOUBLE)
self.assertEquals(message1, message2)
def testUnrecognizedFieldInvalidVariant(self):
class MyMessage(messages.Message):
field1 = messages.IntegerField()
message1 = MyMessage()
self.assertRaises(TypeError, message1.set_unrecognized_field, 'unknown4',
{'unhandled': 'type'}, None)
self.assertRaises(TypeError, message1.set_unrecognized_field, 'unknown4',
{'unhandled': 'type'}, 123)
def testRepr(self):
"""Test represtation of Message object."""
class MyMessage(messages.Message):
integer_value = messages.IntegerField()
string_value = messages.StringField()
unassigned = messages.StringField()
unassigned_with_default = messages.StringField(default=u'a default')
my_message = MyMessage()
my_message.integer_value = 42
my_message.string_value = u'A string'
self.assertEquals("<MyMessage\n integer_value: 42\n"
" string_value: u'A string'>", repr(my_message))
def testValidation(self):
"""Test validation of message values."""
# Test optional.
class SubMessage(messages.Message):
pass
class Message(messages.Message):
val = messages.MessageField(SubMessage)
message = Message()
message_field = messages.MessageField(Message)
message_field.validate(message)
message.val = SubMessage()
message_field.validate(message)
self.assertRaises(messages.ValidationError,
setattr, message, 'val', [SubMessage()])
# Test required.
class Message(messages.Message):
val = messages.MessageField(SubMessage, required=True)
message = Message()
message_field = messages.MessageField(Message)
message_field.validate(message)
message.val = SubMessage()
message_field.validate(message)
self.assertRaises(messages.ValidationError,
setattr, message, 'val', [SubMessage()])
# Test repeated.
class Message(messages.Message):
val = messages.MessageField(SubMessage, repeated=True)
message = Message()
message_field = messages.MessageField(Message)
message_field.validate(message)
self.assertRaisesWithRegexpMatch(
messages.ValidationError,
"Field val is repeated. Found: <SubMessage>",
setattr, message, 'val', SubMessage())
message.val = [SubMessage()]
message_field.validate(message)
def testDefinitionName(self):
"""Test message name."""
class MyMessage(messages.Message):
pass
module_name = test_util.get_module_name(FieldTest)
self.assertEquals('%s.MyMessage' % module_name,
MyMessage.definition_name())
self.assertEquals(module_name, MyMessage.outer_definition_name())
self.assertEquals(module_name, MyMessage.definition_package())
self.assertEquals(unicode, type(MyMessage.definition_name()))
self.assertEquals(unicode, type(MyMessage.outer_definition_name()))
self.assertEquals(unicode, type(MyMessage.definition_package()))
def testDefinitionName_OverrideModule(self):
"""Test message module is overriden by module package name."""
class MyMessage(messages.Message):
pass
global package
package = 'my.package'
try:
self.assertEquals('my.package.MyMessage', MyMessage.definition_name())
self.assertEquals('my.package', MyMessage.outer_definition_name())
self.assertEquals('my.package', MyMessage.definition_package())
self.assertEquals(unicode, type(MyMessage.definition_name()))
self.assertEquals(unicode, type(MyMessage.outer_definition_name()))
self.assertEquals(unicode, type(MyMessage.definition_package()))
finally:
del package
def testDefinitionName_NoModule(self):
"""Test what happens when there is no module for message."""
class MyMessage(messages.Message):
pass
original_modules = sys.modules
sys.modules = dict(sys.modules)
try:
del sys.modules[__name__]
self.assertEquals('MyMessage', MyMessage.definition_name())
self.assertEquals(None, MyMessage.outer_definition_name())
self.assertEquals(None, MyMessage.definition_package())
self.assertEquals(unicode, type(MyMessage.definition_name()))
finally:
sys.modules = original_modules
def testDefinitionName_Nested(self):
"""Test nested message names."""
class MyMessage(messages.Message):
class NestedMessage(messages.Message):
class NestedMessage(messages.Message):
pass
module_name = test_util.get_module_name(MessageTest)
self.assertEquals('%s.MyMessage.NestedMessage' % module_name,
MyMessage.NestedMessage.definition_name())
self.assertEquals('%s.MyMessage' % module_name,
MyMessage.NestedMessage.outer_definition_name())
self.assertEquals(module_name,
MyMessage.NestedMessage.definition_package())
self.assertEquals('%s.MyMessage.NestedMessage.NestedMessage' % module_name,
MyMessage.NestedMessage.NestedMessage.definition_name())
self.assertEquals(
'%s.MyMessage.NestedMessage' % module_name,
MyMessage.NestedMessage.NestedMessage.outer_definition_name())
self.assertEquals(
module_name,
MyMessage.NestedMessage.NestedMessage.definition_package())
def testMessageDefinition(self):
"""Test that enumeration knows its enclosing message definition."""
class OuterMessage(messages.Message):
class InnerMessage(messages.Message):
pass
self.assertEquals(None, OuterMessage.message_definition())
self.assertEquals(OuterMessage,
OuterMessage.InnerMessage.message_definition())
def testConstructorKwargs(self):
"""Test kwargs via constructor."""
class SomeMessage(messages.Message):
name = messages.StringField()
number = messages.IntegerField()
expected = SomeMessage()
expected.name = 'my name'
expected.number = 200
self.assertEquals(expected, SomeMessage(name='my name', number=200))
def testConstructorNotAField(self):
"""Test kwargs via constructor with wrong names."""
class SomeMessage(messages.Message):
pass
self.assertRaisesWithRegexpMatch(
AttributeError,
"May not assign arbitrary value 'does_not_exist' to message 'SomeMessage'",
SomeMessage,
does_not_exist=10)
def testGetUnsetRepeatedValue(self):
class SomeMessage(messages.Message):
repeated = messages.IntegerField(repeated=True)
instance = SomeMessage()
self.assertEquals([], instance.repeated)
self.assertTrue(isinstance(instance.repeated, messages.FieldList))
def testCompareAutoInitializedRepeatedFields(self):
class SomeMessage(messages.Message):
repeated = messages.IntegerField(repeated=True)
message1 = SomeMessage(repeated=[])
message2 = SomeMessage()
self.assertNotEquals(message1, message2)
def testUnknownValues(self):
"""Test message class equality with unknown fields."""
class MyMessage(messages.Message):
field1 = messages.IntegerField()
message = MyMessage()
self.assertEquals([], message.all_unrecognized_fields())
self.assertEquals((None, None),
message.get_unrecognized_field_info('doesntexist'))
self.assertEquals((None, None),
message.get_unrecognized_field_info(
'doesntexist', None, None))
self.assertEquals(('defaultvalue', 'defaultwire'),
message.get_unrecognized_field_info(
'doesntexist', 'defaultvalue', 'defaultwire'))
self.assertEquals((3, None),
message.get_unrecognized_field_info(
'doesntexist', value_default=3))
message.set_unrecognized_field('exists', 9.5, messages.Variant.DOUBLE)
self.assertEquals(1, len(message.all_unrecognized_fields()))
self.assertIn('exists', message.all_unrecognized_fields())
self.assertEquals((9.5, messages.Variant.DOUBLE),
message.get_unrecognized_field_info('exists'))
self.assertEquals((9.5, messages.Variant.DOUBLE),
message.get_unrecognized_field_info('exists', 'type',
1234))
self.assertEquals((1234, None),
message.get_unrecognized_field_info('doesntexist', 1234))
message.set_unrecognized_field('another', 'value', messages.Variant.STRING)
self.assertEquals(2, len(message.all_unrecognized_fields()))
self.assertIn('exists', message.all_unrecognized_fields())
self.assertIn('another', message.all_unrecognized_fields())
self.assertEquals((9.5, messages.Variant.DOUBLE),
message.get_unrecognized_field_info('exists'))
self.assertEquals(('value', messages.Variant.STRING),
message.get_unrecognized_field_info('another'))
message.set_unrecognized_field('typetest1', ['list', 0, ('test',)],
messages.Variant.STRING)
self.assertEquals((['list', 0, ('test',)], messages.Variant.STRING),
message.get_unrecognized_field_info('typetest1'))
message.set_unrecognized_field('typetest2', '', messages.Variant.STRING)
self.assertEquals(('', messages.Variant.STRING),
message.get_unrecognized_field_info('typetest2'))
def testStripUnknownValues(self):
"""Test message class equality with unknown fields."""
class MyMessage(messages.Message):
field1 = messages.IntegerField()
message = MyMessage()
message.set_unrecognized_field('exists', 9.5, messages.Variant.DOUBLE)
self.assertEquals(1, len(message.all_unrecognized_fields()))
message.strip_unrecognized_fields()
self.assertEquals(0, len(message.all_unrecognized_fields()))
class FindDefinitionTest(test_util.TestCase):
"""Test finding definitions relative to various definitions and modules."""
def setUp(self):
"""Set up module-space. Starts off empty."""
self.modules = {}
def DefineModule(self, name):
"""Define a module and its parents in module space.
Modules that are already defined in self.modules are not re-created.
Args:
name: Fully qualified name of modules to create.
Returns:
Deepest nested module. For example:
DefineModule('a.b.c') # Returns c.
"""
name_path = name.split('.')
full_path = []
for node in name_path:
full_path.append(node)
full_name = '.'.join(full_path)
self.modules.setdefault(full_name, new.module(full_name))
return self.modules[name]
def DefineMessage(self, module, name, children={}, add_to_module=True):
"""Define a new Message class in the context of a module.
Used for easily describing complex Message hierarchy. Message is defined
including all child definitions.
Args:
module: Fully qualified name of module to place Message class in.
name: Name of Message to define within module.
children: Define any level of nesting of children definitions. To define
a message, map the name to another dictionary. The dictionary can
itself contain additional definitions, and so on. To map to an Enum,
define the Enum class separately and map it by name.
add_to_module: If True, new Message class is added to module. If False,
new Message is not added.
"""
# Make sure module exists.
module_instance = self.DefineModule(module)
# Recursively define all child messages.
for attribute, value in children.items():
if isinstance(value, dict):
children[attribute] = self.DefineMessage(
module, attribute, value, False)
# Override default __module__ variable.
children['__module__'] = module
# Instantiate and possibly add to module.
message_class = new.classobj(name, (messages.Message,), dict(children))
if add_to_module:
setattr(module_instance, name, message_class)
return message_class
def Importer(self, module, globals='', locals='', fromlist=None):
"""Importer function.
Acts like __import__. Only loads modules from self.modules. Does not
try to load real modules defined elsewhere. Does not try to handle relative
imports.
Args:
module: Fully qualified name of module to load from self.modules.
"""
if fromlist is None:
module = module.split('.')[0]
try:
return self.modules[module]
except KeyError:
raise ImportError()
def testNoSuchModule(self):
"""Test searching for definitions that do no exist."""
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'does.not.exist',
importer=self.Importer)
def testRefersToModule(self):
"""Test that referring to a module does not return that module."""
self.DefineModule('i.am.a.module')
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'i.am.a.module',
importer=self.Importer)
def testNoDefinition(self):
"""Test not finding a definition in an existing module."""
self.DefineModule('i.am.a.module')
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'i.am.a.module.MyMessage',
importer=self.Importer)
def testNotADefinition(self):
"""Test trying to fetch something that is not a definition."""
module = self.DefineModule('i.am.a.module')
setattr(module, 'A', 'a string')
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'i.am.a.module.A',
importer=self.Importer)
def testGlobalFind(self):
"""Test finding definitions from fully qualified module names."""
A = self.DefineMessage('a.b.c', 'A', {})
self.assertEquals(A, messages.find_definition('a.b.c.A',
importer=self.Importer))
B = self.DefineMessage('a.b.c', 'B', {'C': {}})
self.assertEquals(B.C, messages.find_definition('a.b.c.B.C',
importer=self.Importer))
def testRelativeToModule(self):
"""Test finding definitions relative to modules."""
# Define modules.
a = self.DefineModule('a')
b = self.DefineModule('a.b')
c = self.DefineModule('a.b.c')
# Define messages.
A = self.DefineMessage('a', 'A')
B = self.DefineMessage('a.b', 'B')
C = self.DefineMessage('a.b.c', 'C')
D = self.DefineMessage('a.b.d', 'D')
# Find A, B, C and D relative to a.
self.assertEquals(A, messages.find_definition(
'A', a, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'b.B', a, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'b.c.C', a, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'b.d.D', a, importer=self.Importer))
# Find A, B, C and D relative to b.
self.assertEquals(A, messages.find_definition(
'A', b, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', b, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'c.C', b, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'd.D', b, importer=self.Importer))
# Find A, B, C and D relative to c. Module d is the same case as c.
self.assertEquals(A, messages.find_definition(
'A', c, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', c, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'C', c, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'd.D', c, importer=self.Importer))
def testRelativeToMessages(self):
"""Test finding definitions relative to Message definitions."""
A = self.DefineMessage('a.b', 'A', {'B': {'C': {}, 'D': {}}})
B = A.B
C = A.B.C
D = A.B.D
# Find relative to A.
self.assertEquals(A, messages.find_definition(
'A', A, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', A, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'B.C', A, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'B.D', A, importer=self.Importer))
# Find relative to B.
self.assertEquals(A, messages.find_definition(
'A', B, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', B, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'C', B, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'D', B, importer=self.Importer))
# Find relative to C.
self.assertEquals(A, messages.find_definition(
'A', C, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'B', C, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'C', C, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'D', C, importer=self.Importer))
# Find relative to C searching from c.
self.assertEquals(A, messages.find_definition(
'b.A', C, importer=self.Importer))
self.assertEquals(B, messages.find_definition(
'b.A.B', C, importer=self.Importer))
self.assertEquals(C, messages.find_definition(
'b.A.B.C', C, importer=self.Importer))
self.assertEquals(D, messages.find_definition(
'b.A.B.D', C, importer=self.Importer))
def testAbsoluteReference(self):
"""Test finding absolute definition names."""
# Define modules.
a = self.DefineModule('a')
b = self.DefineModule('a.a')
# Define messages.
aA = self.DefineMessage('a', 'A')
aaA = self.DefineMessage('a.a', 'A')
# Always find a.A.
self.assertEquals(aA, messages.find_definition('.a.A', None,
importer=self.Importer))
self.assertEquals(aA, messages.find_definition('.a.A', a,
importer=self.Importer))
self.assertEquals(aA, messages.find_definition('.a.A', aA,
importer=self.Importer))
self.assertEquals(aA, messages.find_definition('.a.A', aaA,
importer=self.Importer))
def testFindEnum(self):
"""Test that Enums are found."""
class Color(messages.Enum):
pass
A = self.DefineMessage('a', 'A', {'Color': Color})
self.assertEquals(
Color,
messages.find_definition('Color', A, importer=self.Importer))
def testFalseScope(self):
"""Test that Message definitions nested in strange objects are hidden."""
global X
class X(object):
class A(messages.Message):
pass
self.assertRaises(TypeError, messages.find_definition, 'A', X)
self.assertRaises(messages.DefinitionNotFoundError,
messages.find_definition,
'X.A', sys.modules[__name__])
def testSearchAttributeFirst(self):
"""Make sure not faked out by module, but continues searching."""
A = self.DefineMessage('a', 'A')
module_A = self.DefineModule('a.A')
self.assertEquals(A, messages.find_definition(
'a.A', None, importer=self.Importer))
def testUnicodeString(self):
"""Make sure not faked out by module, but continues searching."""
A = self.DefineMessage('a', 'A')
module_A = self.DefineModule('a.A')
self.assertEquals(A, messages.find_definition(
u'a.A', None, importer=self.Importer))
def main():
unittest.main()
if __name__ == '__main__':
main()
| |
"""The tests for the time_pattern automation."""
from asynctest.mock import patch
import pytest
import homeassistant.components.automation as automation
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, async_mock_service, mock_component
from tests.components.automation import common
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
async def test_if_fires_when_hour_matches(hass, calls):
"""Test for firing if hour is matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=3
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": 0,
"minutes": "*",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, hour=0))
await hass.async_block_till_done()
assert len(calls) == 1
await common.async_turn_off(hass)
await hass.async_block_till_done()
async_fire_time_changed(hass, now.replace(year=now.year + 1, hour=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_minute_matches(hass, calls):
"""Test for firing if minutes are matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=30
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": 0,
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, minute=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_second_matches(hass, calls):
"""Test for firing if seconds are matching."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, second=30
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "*",
"seconds": 0,
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(hass, now.replace(year=now.year + 2, second=0))
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_when_all_matches(hass, calls):
"""Test for firing if everything matches."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=4
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": 1,
"minutes": 2,
"seconds": 3,
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=3)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_periodic_seconds(hass, calls):
"""Test for firing periodically every second."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, second=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "*",
"seconds": "/10",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=0, minute=0, second=10)
)
await hass.async_block_till_done()
assert len(calls) >= 1
async def test_if_fires_periodic_minutes(hass, calls):
"""Test for firing periodically every minute."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "*",
"minutes": "/2",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=0, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_periodic_hours(hass, calls):
"""Test for firing periodically every hour."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, hour=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "time_pattern",
"hours": "/2",
"minutes": "*",
"seconds": "*",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=2, minute=0, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_default_values(hass, calls):
"""Test for firing at 2 minutes every hour."""
now = dt_util.utcnow()
time_that_will_not_match_right_away = dt_util.utcnow().replace(
year=now.year + 1, minute=1
)
with patch(
"homeassistant.util.dt.utcnow", return_value=time_that_will_not_match_right_away
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "time_pattern", "minutes": "2"},
"action": {"service": "test.automation"},
}
},
)
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=1, minute=2, second=1)
)
await hass.async_block_till_done()
assert len(calls) == 1
async_fire_time_changed(
hass, now.replace(year=now.year + 2, hour=2, minute=2, second=0)
)
await hass.async_block_till_done()
assert len(calls) == 2
| |
# coding: utf-8
from __future__ import unicode_literals
import re
from .adobepass import AdobePassIE
from ..compat import compat_str
from ..utils import (
xpath_text,
int_or_none,
determine_ext,
float_or_none,
parse_duration,
xpath_attr,
update_url_query,
ExtractorError,
strip_or_none,
)
class TurnerBaseIE(AdobePassIE):
_AKAMAI_SPE_TOKEN_CACHE = {}
def _extract_timestamp(self, video_data):
return int_or_none(xpath_attr(video_data, 'dateCreated', 'uts'))
def _add_akamai_spe_token(self, tokenizer_src, video_url, content_id, ap_data, custom_tokenizer_query=None):
secure_path = self._search_regex(r'https?://[^/]+(.+/)', video_url, 'secure path') + '*'
token = self._AKAMAI_SPE_TOKEN_CACHE.get(secure_path)
if not token:
query = {
'path': secure_path,
}
if custom_tokenizer_query:
query.update(custom_tokenizer_query)
else:
query['videoId'] = content_id
if ap_data.get('auth_required'):
query['accessToken'] = self._extract_mvpd_auth(ap_data['url'], content_id, ap_data['site_name'], ap_data['site_name'])
auth = self._download_xml(
tokenizer_src, content_id, query=query)
error_msg = xpath_text(auth, 'error/msg')
if error_msg:
raise ExtractorError(error_msg, expected=True)
token = xpath_text(auth, 'token')
if not token:
return video_url
self._AKAMAI_SPE_TOKEN_CACHE[secure_path] = token
return video_url + '?hdnea=' + token
def _extract_cvp_info(self, data_src, video_id, path_data={}, ap_data={}):
video_data = self._download_xml(data_src, video_id)
video_id = video_data.attrib['id']
title = xpath_text(video_data, 'headline', fatal=True)
content_id = xpath_text(video_data, 'contentId') or video_id
# rtmp_src = xpath_text(video_data, 'akamai/src')
# if rtmp_src:
# splited_rtmp_src = rtmp_src.split(',')
# if len(splited_rtmp_src) == 2:
# rtmp_src = splited_rtmp_src[1]
# aifp = xpath_text(video_data, 'akamai/aifp', default='')
urls = []
formats = []
rex = re.compile(
r'(?P<width>[0-9]+)x(?P<height>[0-9]+)(?:_(?P<bitrate>[0-9]+))?')
# Possible formats locations: files/file, files/groupFiles/files
# and maybe others
for video_file in video_data.findall('.//file'):
video_url = video_file.text.strip()
if not video_url:
continue
ext = determine_ext(video_url)
if video_url.startswith('/mp4:protected/'):
continue
# TODO Correct extraction for these files
# protected_path_data = path_data.get('protected')
# if not protected_path_data or not rtmp_src:
# continue
# protected_path = self._search_regex(
# r'/mp4:(.+)\.[a-z0-9]', video_url, 'secure path')
# auth = self._download_webpage(
# protected_path_data['tokenizer_src'], query={
# 'path': protected_path,
# 'videoId': content_id,
# 'aifp': aifp,
# })
# token = xpath_text(auth, 'token')
# if not token:
# continue
# video_url = rtmp_src + video_url + '?' + token
elif video_url.startswith('/secure/'):
secure_path_data = path_data.get('secure')
if not secure_path_data:
continue
video_url = self._add_akamai_spe_token(
secure_path_data['tokenizer_src'],
secure_path_data['media_src'] + video_url,
content_id, ap_data)
elif not re.match('https?://', video_url):
base_path_data = path_data.get(ext, path_data.get('default', {}))
media_src = base_path_data.get('media_src')
if not media_src:
continue
video_url = media_src + video_url
if video_url in urls:
continue
urls.append(video_url)
format_id = video_file.get('bitrate')
if ext == 'smil':
formats.extend(self._extract_smil_formats(
video_url, video_id, fatal=False))
elif ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
video_url, video_id, 'mp4',
m3u8_id=format_id or 'hls', fatal=False)
if '/secure/' in video_url and '?hdnea=' in video_url:
for f in m3u8_formats:
f['_seekable'] = False
formats.extend(m3u8_formats)
elif ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(video_url, {'hdcore': '3.7.0'}),
video_id, f4m_id=format_id or 'hds', fatal=False))
else:
f = {
'format_id': format_id,
'url': video_url,
'ext': ext,
}
mobj = rex.search(format_id + video_url)
if mobj:
f.update({
'width': int(mobj.group('width')),
'height': int(mobj.group('height')),
'tbr': int_or_none(mobj.group('bitrate')),
})
elif isinstance(format_id, compat_str):
if format_id.isdigit():
f['tbr'] = int(format_id)
else:
mobj = re.match(r'ios_(audio|[0-9]+)$', format_id)
if mobj:
if mobj.group(1) == 'audio':
f.update({
'vcodec': 'none',
'ext': 'm4a',
})
else:
f['tbr'] = int(mobj.group(1))
formats.append(f)
self._sort_formats(formats)
subtitles = {}
for source in video_data.findall('closedCaptions/source'):
for track in source.findall('track'):
track_url = track.get('url')
if not isinstance(track_url, compat_str) or track_url.endswith('/big'):
continue
lang = track.get('lang') or track.get('label') or 'en'
subtitles.setdefault(lang, []).append({
'url': track_url,
'ext': {
'scc': 'scc',
'webvtt': 'vtt',
'smptett': 'tt',
}.get(source.get('format'))
})
thumbnails = [{
'id': image.get('cut'),
'url': image.text,
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in video_data.findall('images/image')]
is_live = xpath_text(video_data, 'isLive') == 'true'
return {
'id': video_id,
'title': self._live_title(title) if is_live else title,
'formats': formats,
'subtitles': subtitles,
'thumbnails': thumbnails,
'thumbnail': xpath_text(video_data, 'poster'),
'description': strip_or_none(xpath_text(video_data, 'description')),
'duration': parse_duration(xpath_text(video_data, 'length') or xpath_text(video_data, 'trt')),
'timestamp': self._extract_timestamp(video_data),
'upload_date': xpath_attr(video_data, 'metas', 'version'),
'series': xpath_text(video_data, 'showTitle'),
'season_number': int_or_none(xpath_text(video_data, 'seasonNumber')),
'episode_number': int_or_none(xpath_text(video_data, 'episodeNumber')),
'is_live': is_live,
}
def _extract_ngtv_info(self, media_id, tokenizer_query, ap_data=None):
streams_data = self._download_json(
'http://medium.ngtv.io/media/%s/tv' % media_id,
media_id)['media']['tv']
duration = None
chapters = []
formats = []
for supported_type in ('unprotected', 'bulkaes'):
stream_data = streams_data.get(supported_type, {})
m3u8_url = stream_data.get('secureUrl') or stream_data.get('url')
if not m3u8_url:
continue
if stream_data.get('playlistProtection') == 'spe':
m3u8_url = self._add_akamai_spe_token(
'http://token.ngtv.io/token/token_spe',
m3u8_url, media_id, ap_data or {}, tokenizer_query)
formats.extend(self._extract_m3u8_formats(
m3u8_url, media_id, 'mp4', m3u8_id='hls', fatal=False))
duration = float_or_none(stream_data.get('totalRuntime'))
if not chapters:
for chapter in stream_data.get('contentSegments', []):
start_time = float_or_none(chapter.get('start'))
chapter_duration = float_or_none(chapter.get('duration'))
if start_time is None or chapter_duration is None:
continue
chapters.append({
'start_time': start_time,
'end_time': start_time + chapter_duration,
})
self._sort_formats(formats)
return {
'formats': formats,
'chapters': chapters,
'duration': duration,
}
| |
# -*- coding: utf-8 -*-
'''
integration tests for mac_power
'''
# Import python libs
from __future__ import absolute_import, print_function
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath, destructiveTest
ensure_in_syspath('../../')
# Import salt libs
import integration
import salt.utils
def disabled(f):
def _decorator(f):
print('{0} has been disabled'.format(f.__name__))
return _decorator(f)
class MacPowerModuleTest(integration.ModuleCase):
'''
Validate the mac_power module
'''
COMPUTER_SLEEP = 0
DISPLAY_SLEEP = 0
HARD_DISK_SLEEP = 0
WAKE_ON_MODEM = False
WAKE_ON_NET = False
RESTART_POWER = False
RESTART_FREEZE = False
SLEEP_ON_BUTTON = False
def setUp(self):
'''
Get current settings
'''
if not salt.utils.is_darwin():
self.skipTest('Test only available on Mac OS X')
if not salt.utils.which('systemsetup'):
self.skipTest('Test requires systemsetup binary')
if salt.utils.get_uid(salt.utils.get_user()) != 0:
self.skipTest('Test requires root')
self.COMPUTER_SLEEP = self.run_function('power.get_computer_sleep')
self.DISPLAY_SLEEP = self.run_function('power.get_display_sleep')
self.HARD_DISK_SLEEP = self.run_function('power.get_harddisk_sleep')
self.WAKE_ON_MODEM = self.run_function('power.get_wake_on_modem')
self.WAKE_ON_NET = self.run_function('power.get_wake_on_network')
self.RESTART_POWER = self.run_function('power.get_restart_power_failure')
self.RESTART_FREEZE = self.run_function('power.get_restart_freeze')
self.SLEEP_ON_BUTTON = self.run_function('power.get_sleep_on_power_button')
def tearDown(self):
'''
Reset to original settings
'''
self.run_function('power.set_computer_sleep', [self.COMPUTER_SLEEP])
self.run_function('power.set_display_sleep', [self.DISPLAY_SLEEP])
self.run_function('power.set_harddisk_sleep', [self.HARD_DISK_SLEEP])
self.run_function('power.set_wake_on_modem', [self.WAKE_ON_MODEM])
self.run_function('power.set_wake_on_network', [self.WAKE_ON_NET])
self.run_function('power.set_restart_power_failure',
[self.RESTART_POWER])
self.run_function('power.set_restart_freeze', [self.RESTART_FREEZE])
self.run_function('power.set_sleep_on_power_button',
[self.SLEEP_ON_BUTTON])
@destructiveTest
def test_computer_sleep(self):
'''
Test power.get_computer_sleep
Test power.set_computer_sleep
'''
# Normal Functionality
self.assertTrue(self.run_function('power.set_computer_sleep', [90]))
self.assertEqual(
self.run_function('power.get_computer_sleep'), 'after 90 minutes')
self.assertTrue(self.run_function('power.set_computer_sleep', ['Off']))
self.assertEqual(self.run_function('power.get_computer_sleep'), 'Never')
# Test invalid input
self.assertIn(
'Invalid String Value for Minutes',
self.run_function('power.set_computer_sleep', ['spongebob']))
self.assertIn(
'Invalid Integer Value for Minutes',
self.run_function('power.set_computer_sleep', [0]))
self.assertIn(
'Invalid Integer Value for Minutes',
self.run_function('power.set_computer_sleep', [181]))
self.assertIn(
'Invalid Boolean Value for Minutes',
self.run_function('power.set_computer_sleep', [True]))
@destructiveTest
def test_display_sleep(self):
'''
Test power.get_display_sleep
Test power.set_display_sleep
'''
# Normal Functionality
self.assertTrue(self.run_function('power.set_display_sleep', [90]))
self.assertEqual(
self.run_function('power.get_display_sleep'), 'after 90 minutes')
self.assertTrue(self.run_function('power.set_display_sleep', ['Off']))
self.assertEqual(self.run_function('power.get_display_sleep'), 'Never')
# Test invalid input
self.assertIn(
'Invalid String Value for Minutes',
self.run_function('power.set_display_sleep', ['spongebob']))
self.assertIn(
'Invalid Integer Value for Minutes',
self.run_function('power.set_display_sleep', [0]))
self.assertIn(
'Invalid Integer Value for Minutes',
self.run_function('power.set_display_sleep', [181]))
self.assertIn(
'Invalid Boolean Value for Minutes',
self.run_function('power.set_display_sleep', [True]))
@destructiveTest
def test_harddisk_sleep(self):
'''
Test power.get_harddisk_sleep
Test power.set_harddisk_sleep
'''
# Normal Functionality
self.assertTrue(self.run_function('power.set_harddisk_sleep', [90]))
self.assertEqual(
self.run_function('power.get_harddisk_sleep'), 'after 90 minutes')
self.assertTrue(self.run_function('power.set_harddisk_sleep', ['Off']))
self.assertEqual(self.run_function('power.get_harddisk_sleep'), 'Never')
# Test invalid input
self.assertIn(
'Invalid String Value for Minutes',
self.run_function('power.set_harddisk_sleep', ['spongebob']))
self.assertIn(
'Invalid Integer Value for Minutes',
self.run_function('power.set_harddisk_sleep', [0]))
self.assertIn(
'Invalid Integer Value for Minutes',
self.run_function('power.set_harddisk_sleep', [181]))
self.assertIn(
'Invalid Boolean Value for Minutes',
self.run_function('power.set_harddisk_sleep', [True]))
@disabled
def test_wake_on_modem(self):
'''
Test power.get_wake_on_modem
Test power.set_wake_on_modem
Commands don't seem to be supported on el capitan. Perhaps it works on
OS X Server or older versions
'''
self.assertTrue(self.run_function('power.set_wake_on_modem', ['on']))
self.assertTrue(self.run_function('power.get_wake_on_modem'))
self.assertTrue(self.run_function('power.set_wake_on_modem', ['off']))
self.assertFalse(self.run_function('power.get_wake_on_modem'))
@disabled
def test_wake_on_network(self):
'''
Test power.get_wake_on_network
Test power.set_wake_on_network
Commands don't seem to be supported on el capitan. Perhaps it works on
OS X Server or older versions
'''
self.assertTrue(self.run_function('power.set_wake_on_network', ['on']))
self.assertTrue(self.run_function('power.get_wake_on_network'))
self.assertTrue(self.run_function('power.set_wake_on_network', ['off']))
self.assertFalse(self.run_function('power.get_wake_on_network'))
@disabled
def test_restart_power_failure(self):
'''
Test power.get_restart_power_failure
Test power.set_restart_power_failure
Commands don't seem to be supported on el capitan. Perhaps it works on
OS X Server or older versions
'''
self.assertTrue(
self.run_function('power.set_restart_power_failure', ['on']))
self.assertTrue(self.run_function('power.get_restart_power_failure'))
self.assertTrue(
self.run_function('power.set_restart_power_failure', ['off']))
self.assertFalse(self.run_function('power.get_restart_power_failure'))
@disabled
def test_restart_freeze(self):
'''
Test power.get_restart_freeze
Test power.set_restart_freeze
Though the set command completes successfully, the setting isn't
actually changed
'''
# Normal Functionality
self.assertTrue(self.run_function('power.set_restart_freeze', ['on']))
self.assertTrue(self.run_function('power.get_restart_freeze'))
self.assertTrue(self.run_function('power.set_restart_freeze', ['off']))
self.assertFalse(self.run_function('power.get_restart_freeze'))
@disabled
def test_sleep_on_power_button(self):
'''
Test power.get_sleep_on_power_button
Test power.set_sleep_on_power_button
Commands don't seem to be supported on el capitan. Perhaps it works on
OS X Server or older versions
'''
# Normal Functionality
self.assertTrue(
self.run_function('power.set_sleep_on_power_button', ['on']))
self.assertTrue(self.run_function('power.get_sleep_on_power_button'))
self.assertTrue(
self.run_function('power.set_sleep_on_power_button', ['off']))
self.assertFalse(self.run_function('power.get_sleep_on_power_button'))
if __name__ == '__main__':
from integration import run_tests
run_tests(MacPowerModuleTest)
| |
from ict.connection.node import Node as ConnectionNode
from ict.protobuf.core_pb2 import MetaMessage
from ict.protobuf.obnl_pb2 import *
class Node(ConnectionNode):
"""
This is the base class for all Nodes of the system. Improvement
"""
SCHEDULER_NAME = "scheduler"
SIMULATION = "obnl.simulation.node."
DATA = "obnl.data.node."
LOCAL = "obnl.local.node."
UPDATE_ROUTING = 'obnl.update.block.'
"""Base of every routing key for block messages (followed by the number/position of the block)"""
def __init__(self, host, vhost, username, password, config_file="obnl.json"):
"""
The constructor creates the 3 main queues
- general: To receive data with everyone
- update: To receive data for the time management
- data: To receive attribute update
:param host: the connection to RabbitMQ Server
:param vhost: the virtual host of RabbitMQ Server
:param username: the user connection
:param password: the password connection
:param config_file: the configuration file to generate queues & exchanges
"""
super().__init__(host, vhost, username, password, config_file)
self._simulation = None
@property
def simulation(self):
return self._simulation
def send(self, exchange, routing, message, reply_to=None):
"""
:param exchange: the MQTT exchange
:param routing: the MQTT routing key
:param message: the protobuf message
:param reply_to: the routing key to reply to
"""
mm = MetaMessage()
mm.node_name = self._name
mm.details.Pack(message)
super().send(exchange, routing, mm.SerializeToString(), reply_to)
def reply_to(self, reply_to, message):
"""
Replies to a message.
:param reply_to: the asker
:param message: the message (str)
"""
if reply_to:
m = MetaMessage()
m.node_name = self._name
m.details.Pack(message)
super().send(exchange='', routing=reply_to, message=m.SerializeToString())
def send_simulation(self, routing, message, reply_to=None):
"""
:param routing: the MQTT routing key
:param message: the protobuf message
:param reply_to: the routing key to reply to
"""
self.LOGGER.debug(' ----> ' + routing)
self.send(Node.SIMULATION + self.name, routing, message, reply_to=reply_to)
class ClientNode(Node):
def __init__(self, host, vhost, username, password, api, config_file="client.json", input_attributes=None, output_attributes=None, is_first=False):
super(ClientNode, self).__init__(host, vhost, username, password, config_file)
self._api_node = api
self._next_step = False
self._reply_to = None
self._is_first = is_first
self._current_time = 0
self._time_step = 0
self._links = {}
self._input_values = {}
self._input_attributes = input_attributes
self._output_attributes = output_attributes
si = SimulatorConnection()
self.send_simulation(ClientNode.SIMULATION + 'scheduler', si, reply_to=ClientNode.SIMULATION + self.name)
@property
def input_values(self):
return self._input_values
@property
def input_attributes(self):
return self._input_attributes
@property
def output_attributes(self):
return self._output_attributes
def step(self, current_time, time_step):
self._api_node.step(current_time, time_step)
def update_attribute(self, attr, value):
"""
Sends the new attribute value to those who want to know.
:param attr: the attribute to communicate
:param value: the new value of the attribute
"""
am = AttributeMessage()
am.simulation_time = self._current_time
am.attribute_name = attr
am.attribute_value = float(value)
m = MetaMessage()
m.node_name = self._name
m.details.Pack(am)
if self._output_attributes:
self._channel.publish(exchange=ClientNode.DATA + self._name,
routing_key=ClientNode.DATA + attr,
body=m.SerializeToString())
def on_local(self, ch, method, props, body):
if self._next_step \
and (self._is_first
or not self._input_attributes
or len(self._input_values.keys()) == len(self._input_attributes)):
# TODO: call updateX or updateY depending on the meta content
Node.LOGGER.debug(self.name+" step running.")
self.step(self._current_time, self._time_step)
self._next_step = False
self._input_values.clear()
nm = NextStep()
nm.current_time = self._current_time
nm.time_step = self._time_step
self.reply_to(self._reply_to, nm)
self._channel.basic_ack(delivery_tag=method.delivery_tag)
def on_simulation(self, ch, method, props, body):
mm = MetaMessage()
mm.ParseFromString(body)
if mm.details.Is(NextStep.DESCRIPTOR) and mm.node_name == Node.SCHEDULER_NAME:
nm = NextStep()
mm.details.Unpack(nm)
self._next_step = True
self._reply_to = props.reply_to
self._current_time = nm.current_time
self._time_step = nm.time_step
self.send_local(mm.details)
elif mm.details.Is(SchedulerConnection.DESCRIPTOR):
sc = SchedulerConnection()
mm.details.Unpack(sc)
self._simulation = sc.simulation
self._links = dict(sc.attribute_links)
Node.LOGGER.debug(self.name + " connected to simulation '" + self.simulation+"'")
elif mm.details.Is(Quit.DESCRIPTOR):
Node.LOGGER.info(self.name+" disconnected!")
self._channel.basic_ack(delivery_tag=method.delivery_tag)
self._channel.close()
sys.exit(0)
self._channel.basic_ack(delivery_tag=method.delivery_tag)
def on_data(self, ch, method, props, body):
mm = MetaMessage()
mm.ParseFromString(body)
if mm.details.Is(AttributeMessage.DESCRIPTOR):
am = AttributeMessage()
mm.details.Unpack(am)
Node.LOGGER.debug("Received attribute: "+am.attribute_name+' ('+str(am.attribute_value)+')')
self._input_values[self._links[mm.node_name+'.'+am.attribute_name]] = am.attribute_value
self.send_local(mm.details)
self._channel.basic_ack(delivery_tag=method.delivery_tag)
def send_local(self, message):
"""
Sends the content to local.
:param message: a protobuf message
"""
self.send('', ClientNode.LOCAL + self._name, message)
def send_scheduler(self, message):
"""
Sends the content to scheduler.
:param message: a protobuf message
"""
self.send('', ClientNode.SIMULATION + Node.SCHEDULER_NAME, message)
| |
#!/usr/bin/env python2.7
"""
Toil script to move TCGA data into an S3 bucket.
Dependencies
Curl: apt-get install curl
Docker: wget -qO- https://get.docker.com/ | sh
Toil: pip install toil
S3AM: pip install --pre s3am
"""
import argparse
import glob
import hashlib
import os
import shutil
import subprocess
import tarfile
from toil.job import Job
def build_parser():
parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-s', '--sra', default=None, required=True,
help='Path to a file with one analysis ID per line for data hosted on CGHub.')
parser.add_argument('-k', '--dbgap_key', default=None, required=True,
help='Path to a CGHub key that has access to the TCGA data being requested. An exception will'
'be thrown if "-g" is set but not this argument.')
parser.add_argument('--s3_dir', default=None, required=True, help='S3 Bucket. e.g. tcga-data')
parser.add_argument('--ssec', default=None, required=True, help='Path to Key File for SSE-C Encryption')
parser.add_argument('--single_end', default=None, action='store_true', help='Set this flag if data is single-end')
parser.add_argument('--sudo', dest='sudo', default=None, action='store_true',
help='Docker usually needs sudo to execute locally, but not when running Mesos or when '
'the user is a member of a Docker group.')
return parser
# Convenience Functions
def generate_unique_key(master_key_path, url):
"""
master_key_path: str Path to the BD2K Master Key (for S3 Encryption)
url: str S3 URL (e.g. https://s3-us-west-2.amazonaws.com/bucket/file.txt)
Returns: str 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key
def docker_call(work_dir, tool_parameters, tool, java_opts=None, sudo=False, outfile=None):
"""
Makes subprocess call of a command to a docker container.
tool_parameters: list An array of the parameters to be passed to the tool
tool: str Name of the Docker image to be used (e.g. quay.io/ucsc_cgl/samtools)
java_opts: str Optional commands to pass to a java jar execution. (e.g. '-Xmx15G')
outfile: file Filehandle that stderr will be passed to
sudo: bool If the user wants the docker command executed as sudo
"""
base_docker_call = 'docker run --log-driver=none --rm -v {}:/data'.format(work_dir).split()
if sudo:
base_docker_call = ['sudo'] + base_docker_call
if java_opts:
base_docker_call = base_docker_call + ['-e', 'JAVA_OPTS={}'.format(java_opts)]
try:
if outfile:
subprocess.check_call(base_docker_call + [tool] + tool_parameters, stdout=outfile)
else:
subprocess.check_call(base_docker_call + [tool] + tool_parameters)
except subprocess.CalledProcessError:
raise RuntimeError('docker command returned a non-zero exit status: {}'
''.format(base_docker_call + [tool] + tool_parameters))
except OSError:
raise RuntimeError('docker not found on system. Install on all nodes.')
def parse_sra(path_to_config):
"""
Parses genetorrent config file. Returns list of samples: [ [id1, id1 ], [id2, id2], ... ]
Returns duplicate of ids to follow UUID/URL standard.
"""
samples = []
with open(path_to_config, 'r') as f:
for line in f.readlines():
if not line.isspace():
samples.append(line.strip())
return samples
def tarball_files(work_dir, tar_name, uuid=None, files=None):
"""
Tars a group of files together into a tarball
work_dir: str Current Working Directory
tar_name: str Name of tarball
uuid: str UUID to stamp files with
files: str(s) List of filenames to place in the tarball from working directory
"""
with tarfile.open(os.path.join(work_dir, tar_name), 'w:gz') as f_out:
for fname in files:
if uuid:
f_out.add(os.path.join(work_dir, fname), arcname=uuid + '.' + fname)
else:
f_out.add(os.path.join(work_dir, fname), arcname=fname)
# Job Functions
def start_batch(job, input_args):
"""
This function will administer 5 jobs at a time then recursively call itself until subset is empty
"""
samples = parse_sra(input_args['sra'])
# for analysis_id in samples:
job.addChildJobFn(download_and_transfer_sample, input_args, samples, cores=1, disk='30')
def download_and_transfer_sample(job, input_args, samples):
"""
Downloads a sample from dbGaP via SRAToolKit, then uses S3AM to transfer it to S3
input_args: dict Dictionary of input arguments
analysis_id: str An analysis ID for a sample in CGHub
"""
if len(samples) > 1:
a = samples[len(samples)/2:]
b = samples[:len(samples)/2]
job.addChildJobFn(download_and_transfer_sample, input_args, a, disk='30G')
job.addChildJobFn(download_and_transfer_sample, input_args, b, disk='30G')
else:
analysis_id = samples[0]
work_dir = job.fileStore.getLocalTempDir()
sudo = input_args['sudo']
# Acquire dbgap_key
shutil.copy(input_args['dbgap_key'], os.path.join(work_dir, 'dbgap.ngc'))
# Call to fastq-dump to pull down SRA files and convert to fastq
if input_args['single_end']:
parameters = [analysis_id]
else:
parameters = ['--split-files', analysis_id]
docker_call(tool='quay.io/ucsc_cgl/fastq-dump:2.5.7--4577a6c1a3c94adaa0c25dd6c03518ee610433d1',
work_dir=work_dir, tool_parameters=parameters, sudo=sudo)
# Collect files and encapsulate into a tarball
shutil.rmtree(os.path.join(work_dir, 'sra'))
sample_name = analysis_id + '.tar.gz'
if input_args['single_end']:
r = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*.f*'))]
tarball_files(work_dir, tar_name=sample_name, files=r)
else:
r1 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_1*'))]
r2 = [os.path.basename(x) for x in glob.glob(os.path.join(work_dir, '*_2*'))]
tarball_files(work_dir, tar_name=sample_name, files=r1 + r2)
# Parse s3_dir to get bucket and s3 path
key_path = input_args['ssec']
s3_dir = input_args['s3_dir']
bucket_name = s3_dir.lstrip('/').split('/')[0]
base_url = 'https://s3-us-west-2.amazonaws.com/'
url = os.path.join(base_url, bucket_name, sample_name)
# Generate keyfile for upload
with open(os.path.join(work_dir, 'temp.key'), 'wb') as f_out:
f_out.write(generate_unique_key(key_path, url))
# Upload to S3 via S3AM
s3am_command = ['s3am',
'upload',
'--sse-key-file', os.path.join(work_dir, 'temp.key'),
'file://{}'.format(os.path.join(work_dir, sample_name)),
's3://' + bucket_name + '/']
subprocess.check_call(s3am_command)
def main():
"""
Transfer gTEX data from dbGaP (NCBI) to S3
"""
# Define Parser object and add to toil
parser = build_parser()
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
# Store inputs from argparse
inputs = {'sra': args.sra,
'dbgap_key': args.dbgap_key,
'ssec': args.ssec,
's3_dir': args.s3_dir,
'single_end': args.single_end,
'sudo': args.sudo}
# Sanity checks
if args.ssec:
assert os.path.isfile(args.ssec)
if args.sra:
assert os.path.isfile(args.sra)
if args.dbgap_key:
assert os.path.isfile(args.dbgap_key)
# Start Pipeline
Job.Runner.startToil(Job.wrapJobFn(start_batch, inputs), args)
if __name__ == '__main__':
main()
| |
"""HMAC (Keyed-Hashing for Message Authentication) module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
try:
import _hashlib as _hashopenssl
except ImportError:
_hashopenssl = None
_functype = None
from _operator import _compare_digest as compare_digest
else:
compare_digest = _hashopenssl.compare_digest
_functype = type(_hashopenssl.openssl_sha256) # builtin type
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
__slots__ = (
"_hmac", "_inner", "_outer", "block_size", "digest_size"
)
def __init__(self, key, msg=None, digestmod=''):
"""Create a new HMAC object.
key: bytes or buffer, key for the keyed hash object.
msg: bytes or buffer, Initial input for the hash or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if not digestmod:
raise TypeError("Missing required parameter 'digestmod'.")
if _hashopenssl and isinstance(digestmod, (str, _functype)):
try:
self._init_hmac(key, msg, digestmod)
except _hashopenssl.UnsupportedDigestmodError:
self._init_old(key, msg, digestmod)
else:
self._init_old(key, msg, digestmod)
def _init_hmac(self, key, msg, digestmod):
self._hmac = _hashopenssl.hmac_new(key, msg, digestmod=digestmod)
self.digest_size = self._hmac.digest_size
self.block_size = self._hmac.block_size
def _init_old(self, key, msg, digestmod):
if callable(digestmod):
digest_cons = digestmod
elif isinstance(digestmod, str):
digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
digest_cons = lambda d=b'': digestmod.new(d)
self._hmac = None
self._outer = digest_cons()
self._inner = digest_cons()
self.digest_size = self._inner.digest_size
if hasattr(self._inner, 'block_size'):
blocksize = self._inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
if len(key) > blocksize:
key = digest_cons(key).digest()
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
key = key.ljust(blocksize, b'\0')
self._outer.update(key.translate(trans_5C))
self._inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
if self._hmac:
return self._hmac.name
else:
return f"hmac-{self._inner.name}"
def update(self, msg):
"""Feed data from msg into this hashing object."""
inst = self._hmac or self._inner
inst.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_size = self.digest_size
if self._hmac:
other._hmac = self._hmac.copy()
other._inner = other._outer = None
else:
other._hmac = None
other._inner = self._inner.copy()
other._outer = self._outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
if self._hmac:
return self._hmac
else:
h = self._outer.copy()
h.update(self._inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns the hmac value as bytes. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg=None, digestmod=''):
"""Create a new hashing object and return it.
key: bytes or buffer, The starting key for the hash.
msg: bytes or buffer, Initial input for the hash, or None.
digestmod: A hash name suitable for hashlib.new(). *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
Required as of 3.8, despite its position after the optional
msg argument. Passing it as a keyword argument is
recommended, though not required for legacy API reasons.
You can now feed arbitrary bytes into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
or hexdigest() methods.
"""
return HMAC(key, msg, digestmod)
def digest(key, msg, digest):
"""Fast inline implementation of HMAC.
key: bytes or buffer, The key for the keyed hash object.
msg: bytes or buffer, Input message.
digest: A hash name suitable for hashlib.new() for best performance. *OR*
A hashlib constructor returning a new hash object. *OR*
A module supporting PEP 247.
"""
if _hashopenssl is not None and isinstance(digest, (str, _functype)):
try:
return _hashopenssl.hmac_digest(key, msg, digest)
except _hashopenssl.UnsupportedDigestmodError:
pass
if callable(digest):
digest_cons = digest
elif isinstance(digest, str):
digest_cons = lambda d=b'': _hashlib.new(digest, d)
else:
digest_cons = lambda d=b'': digest.new(d)
inner = digest_cons()
outer = digest_cons()
blocksize = getattr(inner, 'block_size', 64)
if len(key) > blocksize:
key = digest_cons(key).digest()
key = key + b'\x00' * (blocksize - len(key))
inner.update(key.translate(trans_36))
outer.update(key.translate(trans_5C))
inner.update(msg)
outer.update(inner.digest())
return outer.digest()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities used by convolution layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.python.frozen_keras import backend
def convert_data_format(data_format, ndim):
if data_format == 'channels_last':
if ndim == 3:
return 'NWC'
elif ndim == 4:
return 'NHWC'
elif ndim == 5:
return 'NDHWC'
else:
raise ValueError('Input rank not supported:', ndim)
elif data_format == 'channels_first':
if ndim == 3:
return 'NCW'
elif ndim == 4:
return 'NCHW'
elif ndim == 5:
return 'NCDHW'
else:
raise ValueError('Input rank not supported:', ndim)
else:
raise ValueError('Invalid data_format:', data_format)
def normalize_tuple(value, n, name):
"""Transforms a single integer or iterable of integers into an integer tuple.
Arguments:
value: The value to validate and convert. Could an int, or any iterable of
ints.
n: The size of the tuple to be returned.
name: The name of the argument being validated, e.g. "strides" or
"kernel_size". This is only used to format error messages.
Returns:
A tuple of n integers.
Raises:
ValueError: If something else than an int/long or iterable thereof was
passed.
"""
if isinstance(value, int):
return (value,) * n
else:
try:
value_tuple = tuple(value)
except TypeError:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
if len(value_tuple) != n:
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value))
for single_value in value_tuple:
try:
int(single_value)
except (ValueError, TypeError):
raise ValueError('The `' + name + '` argument must be a tuple of ' +
str(n) + ' integers. Received: ' + str(value) + ' '
'including element ' + str(single_value) + ' of type' +
' ' + str(type(single_value)))
return value_tuple
def conv_output_length(input_length, filter_size, padding, stride, dilation=1):
"""Determines output length of a convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full", "causal"
stride: integer.
dilation: dilation rate, integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None
assert padding in {'same', 'valid', 'full', 'causal'}
dilated_filter_size = filter_size + (filter_size - 1) * (dilation - 1)
if padding in ['same', 'causal']:
output_length = input_length
elif padding == 'valid':
output_length = input_length - dilated_filter_size + 1
elif padding == 'full':
output_length = input_length + dilated_filter_size - 1
return (output_length + stride - 1) // stride
def conv_input_length(output_length, filter_size, padding, stride):
"""Determines input length of a convolution given output length.
Arguments:
output_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The input length (integer).
"""
if output_length is None:
return None
assert padding in {'same', 'valid', 'full'}
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
return (output_length - 1) * stride - 2 * pad + filter_size
def deconv_output_length(input_length,
filter_size,
padding,
output_padding=None,
stride=0,
dilation=1):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: Integer.
filter_size: Integer.
padding: one of `"same"`, `"valid"`, `"full"`.
output_padding: Integer, amount of padding along the output dimension. Can
be set to `None` in which case the output length is inferred.
stride: Integer.
dilation: Integer.
Returns:
The output length (integer).
"""
assert padding in {'same', 'valid', 'full'}
if input_length is None:
return None
# Get the dilated kernel size
filter_size = filter_size + (filter_size - 1) * (dilation - 1)
# Infer length if output padding is None, else compute the exact length
if output_padding is None:
if padding == 'valid':
length = input_length * stride + max(filter_size - stride, 0)
elif padding == 'full':
length = input_length * stride - (stride + filter_size - 2)
elif padding == 'same':
length = input_length * stride
else:
if padding == 'same':
pad = filter_size // 2
elif padding == 'valid':
pad = 0
elif padding == 'full':
pad = filter_size - 1
length = ((input_length - 1) * stride + filter_size - 2 * pad +
output_padding)
return length
def normalize_data_format(value):
if value is None:
value = backend.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
def normalize_padding(value):
if isinstance(value, (list, tuple)):
return value
padding = value.lower()
if padding not in {'valid', 'same', 'causal'}:
raise ValueError('The `padding` argument must be a list/tuple or one of '
'"valid", "same" (or "causal", only for `Conv1D). '
'Received: ' + str(padding))
return padding
def convert_kernel(kernel):
"""Converts a Numpy kernel matrix from Theano format to TensorFlow format.
Also works reciprocally, since the transformation is its own inverse.
This is used for converting legacy Theano-saved model files.
Arguments:
kernel: Numpy array (3D, 4D or 5D).
Returns:
The converted kernel.
Raises:
ValueError: in case of invalid kernel shape or invalid data_format.
"""
kernel = np.asarray(kernel)
if not 3 <= kernel.ndim <= 5:
raise ValueError('Invalid kernel shape:', kernel.shape)
slices = [slice(None, None, -1) for _ in range(kernel.ndim)]
no_flip = (slice(None, None), slice(None, None))
slices[-2:] = no_flip
return np.copy(kernel[slices])
def conv_kernel_mask(input_shape, kernel_shape, strides, padding):
"""Compute a mask representing the connectivity of a convolution operation.
Assume a convolution with given parameters is applied to an input having N
spatial dimensions with `input_shape = (d_in1, ..., d_inN)` to produce an
output with shape `(d_out1, ..., d_outN)`. This method returns a boolean array
of shape `(d_in1, ..., d_inN, d_out1, ..., d_outN)` with `True` entries
indicating pairs of input and output locations that are connected by a weight.
Example:
>>> input_shape = (4,)
>>> kernel_shape = (2,)
>>> strides = (1,)
>>> padding = "valid"
>>> conv_kernel_mask(input_shape, kernel_shape, strides, padding)
array([[ True, False, False],
[ True, True, False],
[False, True, True],
[False, False, True]])
where rows and columns correspond to inputs and outputs respectively.
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
Returns:
A boolean 2N-D `np.ndarray` of shape
`(d_in1, ..., d_inN, d_out1, ..., d_outN)`, where `(d_out1, ..., d_outN)`
is the spatial shape of the output. `True` entries in the mask represent
pairs of input-output locations that are connected by a weight.
Raises:
ValueError: if `input_shape`, `kernel_shape` and `strides` don't have the
same number of dimensions.
NotImplementedError: if `padding` is not in {`"same"`, `"valid"`}.
"""
if padding not in {'same', 'valid'}:
raise NotImplementedError('Padding type %s not supported. '
'Only "valid" and "same" '
'are implemented.' % padding)
in_dims = len(input_shape)
if isinstance(kernel_shape, int):
kernel_shape = (kernel_shape,) * in_dims
if isinstance(strides, int):
strides = (strides,) * in_dims
kernel_dims = len(kernel_shape)
stride_dims = len(strides)
if kernel_dims != in_dims or stride_dims != in_dims:
raise ValueError('Number of strides, input and kernel dimensions must all '
'match. Received: %d, %d, %d.' %
(stride_dims, in_dims, kernel_dims))
output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding)
mask_shape = input_shape + output_shape
mask = np.zeros(mask_shape, np.bool)
output_axes_ticks = [range(dim) for dim in output_shape]
for output_position in itertools.product(*output_axes_ticks):
input_axes_ticks = conv_connected_inputs(input_shape, kernel_shape,
output_position, strides, padding)
for input_position in itertools.product(*input_axes_ticks):
mask[input_position + output_position] = True
return mask
def conv_kernel_idxs(input_shape, kernel_shape, strides, padding, filters_in,
filters_out, data_format):
"""Yields output-input tuples of indices in a CNN layer.
The generator iterates over all `(output_idx, input_idx)` tuples, where
`output_idx` is an integer index in a flattened tensor representing a single
output image of a convolutional layer that is connected (via the layer
weights) to the respective single input image at `input_idx`
Example:
>>> input_shape = (2, 2)
>>> kernel_shape = (2, 1)
>>> strides = (1, 1)
>>> padding = "valid"
>>> filters_in = 1
>>> filters_out = 1
>>> data_format = "channels_last"
>>> list(conv_kernel_idxs(input_shape, kernel_shape, strides, padding,
... filters_in, filters_out, data_format))
[(0, 0), (0, 2), (1, 1), (1, 3)]
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
filters_in: `int`, number if filters in the input to the layer.
filters_out: `int', number if filters in the output of the layer.
data_format: string, "channels_first" or "channels_last".
Yields:
The next tuple `(output_idx, input_idx)`, where
`output_idx` is an integer index in a flattened tensor representing a single
output image of a convolutional layer that is connected (via the layer
weights) to the respective single input image at `input_idx`.
Raises:
ValueError: if `data_format` is neither
`"channels_last"` nor `"channels_first"`, or if number of strides, input,
and kernel number of dimensions do not match.
NotImplementedError: if `padding` is neither `"same"` nor `"valid"`.
"""
if padding not in ('same', 'valid'):
raise NotImplementedError('Padding type %s not supported. '
'Only "valid" and "same" '
'are implemented.' % padding)
in_dims = len(input_shape)
if isinstance(kernel_shape, int):
kernel_shape = (kernel_shape,) * in_dims
if isinstance(strides, int):
strides = (strides,) * in_dims
kernel_dims = len(kernel_shape)
stride_dims = len(strides)
if kernel_dims != in_dims or stride_dims != in_dims:
raise ValueError('Number of strides, input and kernel dimensions must all '
'match. Received: %d, %d, %d.' %
(stride_dims, in_dims, kernel_dims))
output_shape = conv_output_shape(input_shape, kernel_shape, strides, padding)
output_axes_ticks = [range(dim) for dim in output_shape]
if data_format == 'channels_first':
concat_idxs = lambda spatial_idx, filter_idx: (filter_idx,) + spatial_idx
elif data_format == 'channels_last':
concat_idxs = lambda spatial_idx, filter_idx: spatial_idx + (filter_idx,)
else:
raise ValueError('Data format %s not recognized.'
'`data_format` must be "channels_first" or '
'"channels_last".' % data_format)
for output_position in itertools.product(*output_axes_ticks):
input_axes_ticks = conv_connected_inputs(input_shape, kernel_shape,
output_position, strides, padding)
for input_position in itertools.product(*input_axes_ticks):
for f_in in range(filters_in):
for f_out in range(filters_out):
out_idx = np.ravel_multi_index(
multi_index=concat_idxs(output_position, f_out),
dims=concat_idxs(output_shape, filters_out))
in_idx = np.ravel_multi_index(
multi_index=concat_idxs(input_position, f_in),
dims=concat_idxs(input_shape, filters_in))
yield (out_idx, in_idx)
def conv_connected_inputs(input_shape, kernel_shape, output_position, strides,
padding):
"""Return locations of the input connected to an output position.
Assume a convolution with given parameters is applied to an input having N
spatial dimensions with `input_shape = (d_in1, ..., d_inN)`. This method
returns N ranges specifying the input region that was convolved with the
kernel to produce the output at position
`output_position = (p_out1, ..., p_outN)`.
Example:
>>> input_shape = (4, 4)
>>> kernel_shape = (2, 1)
>>> output_position = (1, 1)
>>> strides = (1, 1)
>>> padding = "valid"
>>> conv_connected_inputs(input_shape, kernel_shape, output_position,
... strides, padding)
[range(1, 3), range(1, 2)]
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
output_position: tuple of size N: `(p_out1, ..., p_outN)`, a single position
in the output of the convolution.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
Returns:
N ranges `[[p_in_left1, ..., p_in_right1], ...,
[p_in_leftN, ..., p_in_rightN]]` specifying the region in the
input connected to output_position.
"""
ranges = []
ndims = len(input_shape)
for d in range(ndims):
left_shift = int(kernel_shape[d] / 2)
right_shift = kernel_shape[d] - left_shift
center = output_position[d] * strides[d]
if padding == 'valid':
center += left_shift
start = max(0, center - left_shift)
end = min(input_shape[d], center + right_shift)
ranges.append(range(start, end))
return ranges
def conv_output_shape(input_shape, kernel_shape, strides, padding):
"""Return the output shape of an N-D convolution.
Forces dimensions where input is empty (size 0) to remain empty.
Args:
input_shape: tuple of size N: `(d_in1, ..., d_inN)`, spatial shape of the
input.
kernel_shape: tuple of size N, spatial shape of the convolutional kernel /
receptive field.
strides: tuple of size N, strides along each spatial dimension.
padding: type of padding, string `"same"` or `"valid"`.
Returns:
tuple of size N: `(d_out1, ..., d_outN)`, spatial shape of the output.
"""
dims = range(len(kernel_shape))
output_shape = [
conv_output_length(input_shape[d], kernel_shape[d], padding, strides[d])
for d in dims
]
output_shape = tuple(
[0 if input_shape[d] == 0 else output_shape[d] for d in dims])
return output_shape
| |
#!/usr/bin/env python3
# Copyright (c) 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the SegWit changeover logic."""
from test_framework.address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
program_to_witness,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from test_framework.blocktools import witness_script, send_to_witness
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import sha256, CTransaction, CTxIn, COutPoint, CTxOut, COIN, ToHex, FromHex
from test_framework.address import script_to_p2sh, key_to_p2pkh
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE
from io import BytesIO
NODE_0 = 0
NODE_2 = 2
WIT_V0 = 0
WIT_V1 = 1
def getutxo(txid):
utxo = {}
utxo["vout"] = 0
utxo["txid"] = txid
return utxo
def find_spendable_utxo(node, min_value):
for utxo in node.listunspent(query_options={'minimumAmount': min_value}):
if utxo['spendable']:
return utxo
raise AssertionError("Unspent output equal or higher than %s not found" % min_value)
class SegWitTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [["-walletprematurewitness", "-rpcserialversion=0", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
["-blockversion=4", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-rpcserialversion=1", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"],
["-blockversion=536870915", "-promiscuousmempoolflags=517", "-prematurewitness", "-walletprematurewitness", "-vbparams=segwit:0:999999999999", "-addresstype=legacy", "-deprecatedrpc=addwitnessaddress"]]
def setup_network(self):
super().setup_network()
connect_nodes(self.nodes[0], 2)
self.sync_all()
def success_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 2)
sync_blocks(self.nodes)
def skip_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
block = node.generate(1)
assert_equal(len(node.getblock(block[0])["tx"]), 1)
sync_blocks(self.nodes)
def fail_accept(self, node, error_msg, txid, sign, redeem_script=""):
assert_raises_rpc_error(-26, error_msg, send_to_witness, 1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
def fail_mine(self, node, txid, sign, redeem_script=""):
send_to_witness(1, node, getutxo(txid), self.pubkey[0], False, Decimal("49.998"), sign, redeem_script)
assert_raises_rpc_error(-1, "CreateNewBlock: TestBlockValidity failed", node.generate, 1)
sync_blocks(self.nodes)
def run_test(self):
self.nodes[0].generate(161) #block 161
self.log.info("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] == 1000000)
assert('weightlimit' not in tmpl)
assert(tmpl['sigoplimit'] == 20000)
assert(tmpl['transactions'][0]['hash'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 2)
self.nodes[0].generate(1) #block 162
balance_presetup = self.nodes[0].getbalance()
self.pubkey = []
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
for i in range(3):
newaddress = self.nodes[i].getnewaddress()
self.pubkey.append(self.nodes[i].getaddressinfo(newaddress)["pubkey"])
multiscript = CScript([OP_1, hex_str_to_bytes(self.pubkey[-1]), OP_1, OP_CHECKMULTISIG])
p2sh_addr = self.nodes[i].addwitnessaddress(newaddress)
bip173_addr = self.nodes[i].addwitnessaddress(newaddress, False)
p2sh_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'p2sh-segwit')['address']
bip173_ms_addr = self.nodes[i].addmultisigaddress(1, [self.pubkey[-1]], '', 'bech32')['address']
assert_equal(p2sh_addr, key_to_p2sh_p2wpkh(self.pubkey[-1]))
assert_equal(bip173_addr, key_to_p2wpkh(self.pubkey[-1]))
assert_equal(p2sh_ms_addr, script_to_p2sh_p2wsh(multiscript))
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
for v in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
for i in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
self.nodes[0].generate(1) #block 163
sync_blocks(self.nodes)
# Make sure all nodes recognize the transactions as theirs
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
self.nodes[0].generate(260) #block 423
sync_blocks(self.nodes)
self.log.info("Verify default node can't accept any witness format txs before fork")
# unsigned, no scriptsig
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", wit_ids[NODE_0][WIT_V1][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False)
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False)
# unsigned with redeem script
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V0][0], False, witness_script(False, self.pubkey[0]))
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag", p2sh_ids[NODE_0][WIT_V1][0], False, witness_script(True, self.pubkey[0]))
# signed
self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", wit_ids[NODE_0][WIT_V1][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V0][0], True)
self.fail_accept(self.nodes[0], "no-witness-yet", p2sh_ids[NODE_0][WIT_V1][0], True)
self.log.info("Verify witness txs are skipped for mining before the fork")
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][0], True) #block 424
self.skip_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][0], True) #block 425
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][0], True) #block 426
self.skip_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][0], True) #block 427
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V0][1], False)
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag", p2sh_ids[NODE_2][WIT_V1][1], False)
self.nodes[2].generate(4) # blocks 428-431
self.log.info("Verify previous witness txs skipped for mining can now be mined")
assert_equal(len(self.nodes[2].getrawmempool()), 4)
block = self.nodes[2].generate(1) #block 432 (first block with new rules; 432 = 144 * 3)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[2].getrawmempool()), 0)
segwit_tx_list = self.nodes[2].getblock(block[0])["tx"]
assert_equal(len(segwit_tx_list), 5)
self.log.info("Verify block and transaction serialization rpcs return differing serializations depending on rpc serialization flag")
assert(self.nodes[2].getblock(block[0], False) != self.nodes[0].getblock(block[0], False))
assert(self.nodes[1].getblock(block[0], False) == self.nodes[2].getblock(block[0], False))
for i in range(len(segwit_tx_list)):
tx = FromHex(CTransaction(), self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[2].getrawtransaction(segwit_tx_list[i]) != self.nodes[0].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i], 0) == self.nodes[2].getrawtransaction(segwit_tx_list[i]))
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) != self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[1].getrawtransaction(segwit_tx_list[i]) == self.nodes[2].gettransaction(segwit_tx_list[i])["hex"])
assert(self.nodes[0].getrawtransaction(segwit_tx_list[i]) == bytes_to_hex_str(tx.serialize_without_witness()))
self.log.info("Verify witness txs without witness data are invalid after the fork")
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V0][2], False)
self.fail_mine(self.nodes[2], wit_ids[NODE_2][WIT_V1][2], False)
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V0][2], False, witness_script(False, self.pubkey[2]))
self.fail_mine(self.nodes[2], p2sh_ids[NODE_2][WIT_V1][2], False, witness_script(True, self.pubkey[2]))
self.log.info("Verify default node can now use witness txs")
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V0][0], True) #block 432
self.success_mine(self.nodes[0], wit_ids[NODE_0][WIT_V1][0], True) #block 433
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
self.log.info("Verify sigops are counted in GBT with BIP141 rules after the fork")
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
assert(tmpl['sizelimit'] >= 3999577) # actual maximum size is lower due to minimum mandatory non-witness data
assert(tmpl['weightlimit'] == 4000000)
assert(tmpl['sigoplimit'] == 80000)
assert(tmpl['transactions'][0]['txid'] == txid)
assert(tmpl['transactions'][0]['sigops'] == 8)
self.nodes[0].generate(1) # Mine a block to clear the gbt cache
self.log.info("Non-segwit miners are able to use GBT response after activation.")
# Create a 3-tx chain: tx1 (non-segwit input, paying to a segwit output) ->
# tx2 (segwit input, paying to a non-segwit output) ->
# tx3 (non-segwit input, paying to a non-segwit output).
# tx1 is allowed to appear in the block, but no others.
txid1 = send_to_witness(1, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996"))
hex_tx = self.nodes[0].gettransaction(txid)['hex']
tx = FromHex(CTransaction(), hex_tx)
assert(tx.wit.is_null()) # This should not be a segwit input
assert(txid1 in self.nodes[0].getrawmempool())
# Now create tx2, which will spend from txid1.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b''))
tx.vout.append(CTxOut(int(49.99*COIN), CScript([OP_TRUE])))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))['hex']
txid2 = self.nodes[0].sendrawtransaction(tx2_hex)
tx = FromHex(CTransaction(), tx2_hex)
assert(not tx.wit.is_null())
# Now create tx3, which will spend from txid2
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b""))
tx.vout.append(CTxOut(int(49.95*COIN), CScript([OP_TRUE]))) # Huge fee
tx.calc_sha256()
txid3 = self.nodes[0].sendrawtransaction(ToHex(tx))
assert(tx.wit.is_null())
assert(txid3 in self.nodes[0].getrawmempool())
# Now try calling getblocktemplate() without segwit support.
template = self.nodes[0].getblocktemplate()
# Check that tx1 is the only transaction of the 3 in the template.
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid2 not in template_txids and txid3 not in template_txids)
assert(txid1 in template_txids)
# Check that running with segwit support results in all 3 being included.
template = self.nodes[0].getblocktemplate({"rules": ["segwit"]})
template_txids = [ t['txid'] for t in template['transactions'] ]
assert(txid1 in template_txids)
assert(txid2 in template_txids)
assert(txid3 in template_txids)
# Check that wtxid is properly reported in mempool entry
assert_equal(int(self.nodes[0].getmempoolentry(txid3)["wtxid"], 16), tx.calc_sha256(True))
# Mine a block to clear the gbt cache again.
self.nodes[0].generate(1)
self.log.info("Verify behaviour of importaddress, addwitnessaddress and listunspent")
# Some public keys to be used later
pubkeys = [
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
]
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
assert ((self.nodes[0].getaddressinfo(uncompressed_spendable_address[0])['iscompressed'] == False))
assert ((self.nodes[0].getaddressinfo(compressed_spendable_address[0])['iscompressed'] == True))
self.nodes[0].importpubkey(pubkeys[0])
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
self.nodes[0].importpubkey(pubkeys[1])
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
self.nodes[0].importpubkey(pubkeys[2])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
spendable_after_importaddress = [] # These outputs should be seen after importaddress
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]])['address'])
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
# Test multisig_without_privkey
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])['address']
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with compressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with compressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
# P2WPKH and P2SH_P2WPKH with compressed keys should always be spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# bare and p2sh multisig with uncompressed keys should always be spendable
spendable_anytime.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be spendable
spendable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH, P2PK, P2WPKH and P2SH_P2WPKH with compressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk, p2wpkh, p2sh_p2wpkh])
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
for i in uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
solvable_after_importaddress.extend([bare, p2sh])
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# normal P2PKH and P2PK with uncompressed keys should always be seen
solvable_anytime.extend([p2pkh, p2pk])
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
# Witness output types with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
op1 = CScript([OP_1])
op0 = CScript([OP_0])
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
p2wshop1 = CScript([OP_0, sha256(op1)])
unsolvable_after_importaddress.append(unsolvablep2pkh)
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
unsolvable_after_importaddress.append(p2wshop1)
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
unsolvable_after_importaddress.append(p2shop0)
spendable_txid = []
solvable_txid = []
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
importlist = []
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
bare = hex_str_to_bytes(v['hex'])
importlist.append(bytes_to_hex_str(bare))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
else:
pubkey = hex_str_to_bytes(v['pubkey'])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
importlist.append(bytes_to_hex_str(p2pk))
importlist.append(bytes_to_hex_str(p2pkh))
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
importlist.append(bytes_to_hex_str(op1))
importlist.append(bytes_to_hex_str(p2wshop1))
for i in importlist:
# import all generated addresses. The wallet already has the private keys for some of these, so catch JSON RPC
# exceptions and continue.
try_rpc(-4, "The wallet already contains the private key for this address or script", self.nodes[0].importaddress, i, "", False, True)
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that no witness address should be returned by unsolvable addresses
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address:
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# addwitnessaddress should return a witness addresses even if keys are not in the wallet
self.nodes[0].addwitnessaddress(multisig_without_privkey_address)
for i in compressed_spendable_address + compressed_solvable_address:
witaddress = self.nodes[0].addwitnessaddress(i)
# addwitnessaddress should return the same address if it is a known P2SH-witness address
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
self.mine_and_test_listunspent(unseen_anytime, 0)
# Repeat some tests. This time we don't add witness scripts with importaddress
# Import a compressed key and an uncompressed key, generate some multisig addresses
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
self.nodes[0].importpubkey(pubkeys[5])
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
self.nodes[0].importpubkey(pubkeys[6])
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
unseen_anytime = [] # These outputs should never be seen
solvable_anytime = [] # These outputs should be solvable after importpubkey
unseen_anytime = [] # These outputs should never be seen
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]])['address'])
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]])['address'])
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]])['address'])
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]])['address'])
premature_witaddress = []
for i in compressed_spendable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH are always spendable
spendable_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in uncompressed_spendable_address + uncompressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
for i in compressed_solvable_address:
v = self.nodes[0].getaddressinfo(i)
if (v['isscript']):
# P2WSH multisig without private key are seen after addwitnessaddress
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
premature_witaddress.append(script_to_p2sh(p2wsh))
else:
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
# P2SH_P2PK, P2SH_P2PKH with compressed keys are always solvable
solvable_anytime.extend([p2wpkh, p2sh_p2wpkh])
self.mine_and_test_listunspent(spendable_anytime, 2)
self.mine_and_test_listunspent(solvable_anytime, 1)
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress:
# This will raise an exception
assert_raises_rpc_error(-4, "Public key or redeemscript not known to wallet, or the key is uncompressed", self.nodes[0].addwitnessaddress, i)
# after importaddress it should pass addwitnessaddress
v = self.nodes[0].getaddressinfo(compressed_solvable_address[1])
self.nodes[0].importaddress(v['hex'],"",False,True)
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
witaddress = self.nodes[0].addwitnessaddress(i)
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress + spendable_anytime, 2))
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress + solvable_anytime, 1))
self.mine_and_test_listunspent(unseen_anytime, 0)
# Check that createrawtransaction/decoderawtransaction with non-v0 Bech32 works
v1_addr = program_to_witness(1, [3,5])
v1_tx = self.nodes[0].createrawtransaction([getutxo(spendable_txid[0])],{v1_addr: 1})
v1_decoded = self.nodes[1].decoderawtransaction(v1_tx)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['addresses'][0], v1_addr)
assert_equal(v1_decoded['vout'][0]['scriptPubKey']['hex'], "51020305")
# Check that spendable outputs are really spendable
self.create_and_mine_tx_from_txids(spendable_txid)
# import all the private keys so solvable addresses become spendable
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
self.create_and_mine_tx_from_txids(solvable_txid)
# Test that importing native P2WPKH/P2WSH scripts works
for use_p2wsh in [False, True]:
if use_p2wsh:
scriptPubKey = "00203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a"
transaction = "01000000000100e1f505000000002200203a59f3f56b713fdcf5d1a57357f02c44342cbf306ffe0c4741046837bf90561a00000000"
else:
scriptPubKey = "a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d87"
transaction = "01000000000100e1f5050000000017a9142f8c469c2f0084c48e11f998ffbe7efa7549f26d8700000000"
self.nodes[1].importaddress(scriptPubKey, "", False)
rawtxfund = self.nodes[1].fundrawtransaction(transaction)['hex']
rawtxfund = self.nodes[1].signrawtransactionwithwallet(rawtxfund)["hex"]
txid = self.nodes[1].sendrawtransaction(rawtxfund)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
# Assert it is properly saved
self.stop_node(1)
self.start_node(1)
assert_equal(self.nodes[1].gettransaction(txid, True)["txid"], txid)
assert_equal(self.nodes[1].listtransactions("*", 1, 0, True)[0]["txid"], txid)
def mine_and_test_listunspent(self, script_list, ismine):
utxo = find_spendable_utxo(self.nodes[0], 50)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
for i in script_list:
tx.vout.append(CTxOut(10000000, i))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
txid = self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
watchcount = 0
spendcount = 0
for i in self.nodes[0].listunspent():
if (i['txid'] == txid):
watchcount += 1
if (i['spendable'] == True):
spendcount += 1
if (ismine == 2):
assert_equal(spendcount, len(script_list))
elif (ismine == 1):
assert_equal(watchcount, len(script_list))
assert_equal(spendcount, 0)
else:
assert_equal(watchcount, 0)
return txid
def p2sh_address_to_script(self,v):
bare = CScript(hex_str_to_bytes(v['hex']))
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2wsh = CScript([OP_0, sha256(bare)])
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
return([bare, p2sh, p2wsh, p2sh_p2wsh])
def p2pkh_address_to_script(self,v):
pubkey = hex_str_to_bytes(v['pubkey'])
p2wpkh = CScript([OP_0, hash160(pubkey)])
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
p2pk = CScript([pubkey, OP_CHECKSIG])
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
def create_and_mine_tx_from_txids(self, txids, success = True):
tx = CTransaction()
for i in txids:
txtmp = CTransaction()
txraw = self.nodes[0].getrawtransaction(i)
f = BytesIO(hex_str_to_bytes(txraw))
txtmp.deserialize(f)
for j in range(len(txtmp.vout)):
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
tx.vout.append(CTxOut(0, CScript()))
tx.rehash()
signresults = self.nodes[0].signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
self.nodes[0].sendrawtransaction(signresults, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
SegWitTest().main()
| |
"""
This script migrates data from Midas to Girder.
It can migrate users, collections (communities in Midas), folders, items and
files (bitstreams in Midas).
It can optionally perform multiple operations in parallel to speed up the
migration (see N_JOBS).
It records its progress to a local SQLite database. If the migration fails for
any reason, it can be restarted and the script will skip entities that have
already been migrated.
Since Midas users do not have a username, only an email address, the script will
generate a username based on the first and last name of the user. When
duplicates exist, or when the username is too short, numbers will be appended.
Users are assigned new passwords that are randomly generated. These are logged
to stdout. The random password generation is seeded with the username so that
multiple runs of the script will generate the same password for a given user.
Large migrations are difficult and take a while. Failures are to be expected.
"""
from functools import wraps
from joblib import Parallel, delayed
import girder_client
import logging
import os
import pydas
import random
import requests
import sqlite3
import string
import tempfile
import threading
import time
# Configuration
MIDAS_URL = 'http://127.0.0.1'
MIDAS_LOGIN = 'user@example.com'
MIDAS_API_KEY = 'API_KEY'
GIRDER_URL = 'http://127.0.0.1/api/v1'
GIRDER_LOGIN = 'user'
GIRDER_API_KEY = 'API_KEY'
DRY_RUN = False
MIGRATE_USERS = True
MIGRATE_COLLECTIONS = True
MIGRATE_FILES = True
MIGRATE_METADATA = True
MIGRATE_DB = 'migrate.db'
N_JOBS = 4
# Include or exclude specific resources by breadcrumb path
# e.g. 'collection/COPD/Asthma Phantom Images'
TO_MIGRATE = [
]
NOT_TO_MIGRATE = [
]
# Logging configuration
logging.basicConfig(
format='%(asctime)s.%(msecs)03d %(message)s',
datefmt='%H:%M:%S',
)
logger = logging.getLogger('migrate')
logger.setLevel(logging.DEBUG)
# SQLite functions
conn = sqlite3.connect(MIGRATE_DB, check_same_thread=False)
conn.execute(
'create table if not exists migrated '
'(type text, id int, primary key (type, id));')
conn.execute(
'create table if not exists created '
'(type text, id int, newId text, primary key (type, id));')
db_lock = threading.Lock()
def synchronized(lock):
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
with lock:
return f(*args, **kwargs)
return wrapper
return decorator
@synchronized(db_lock)
def migrated(type, id):
cursor = conn.cursor()
cursor.execute(
'select 1 from migrated where type = ? and id = ?;', (type, id))
result = cursor.fetchone() is not None
if result:
logger.info('DONE %s %s' % (type, id))
return result
@synchronized(db_lock)
def set_migrated(type, id):
conn.execute(
'insert into migrated (type, id) values (?, ?);', (type, id))
conn.commit()
@synchronized(db_lock)
def created(type, id):
cursor = conn.cursor()
cursor.execute(
'select newId from created where type = ? and id = ?;', (type, id))
row = cursor.fetchone()
return row and {'_id': row[0]}
@synchronized(db_lock)
def set_created(type, id, newId):
conn.execute(
'insert into created (type, id, newId) values (?, ?, ?);',
(type, id, newId))
conn.commit()
# Helper functions
def delete_default_folders(user):
folders = gc.listFolder(user['_id'], 'user')
for folder in folders:
gc.delete('folder/%s' % folder['_id'])
def generate_login(user, seen):
for i in range(1000):
login = '%s.%s' % (user['firstname'], user['lastname'])
login = login.lower().encode('ascii', 'ignore')
login = ''.join(x for x in login if x in string.ascii_letters + '.')
if i:
login += str(i)
if len(login) >= 4 and login not in seen:
break
return login
def generate_password(length, seed):
r = random.Random(seed)
alphabet = string.ascii_letters + string.digits
return ''.join(r.choice(alphabet) for _ in range(length))
def write_temp_file(iter_content):
length = 0
f = tempfile.NamedTemporaryFile(delete=False)
for chunk in iter_content:
if chunk:
f.write(chunk)
length += len(chunk)
f.close()
return f.name, length
def breadcrumb(name, bc, extra=''):
logger.info('%9s: %s %s' % (name, '/'.join(bc), extra))
def skip(bc):
bc = '/'.join(bc)
for item in NOT_TO_MIGRATE:
if len(bc) < len(item):
continue
n = len(item)
if bc[:n] == item[:n]:
return True
if not any(TO_MIGRATE):
return False
for item in TO_MIGRATE:
n = min(len(bc), len(item))
if bc[:n] == item[:n]:
return False
return True
# Migration code
def lookup_resource(bc):
path = '/' + '/'.join(bc)
try:
return gc.resourceLookup(path)
except requests.HTTPError:
return None
def get_or_create(type, id, timestamp, bc, func, args):
newObj = created(type, id)
if newObj:
return newObj
try:
newObj = func(*args)
except requests.HTTPError:
newObj = lookup_resource(bc)
if not newObj:
raise
gc.setResourceTimestamp(newObj['_id'], type, created=timestamp)
set_created(type, id, newObj['_id'])
return newObj
def handle_file(item, newItem, bc):
if migrated('file', item['item_id']):
return
n = 3
for i in range(n):
path = None
try:
filename, iter_content = mc.download_item(item['item_id'], token)
path, length = write_temp_file(iter_content)
if not DRY_RUN:
breadcrumb('File', bc, '[%d bytes]' % length)
with open(path, 'rb') as f:
gc.uploadFile(
newItem['_id'], f, filename, length)
set_migrated('file', item['item_id'])
return
except requests.HTTPError as e:
logger.exception(e.response.text)
if i == n - 1:
raise
breadcrumb('RETRY', bc)
time.sleep(1)
finally:
if path:
os.remove(path)
def handle_item(item, parent, depth, bc):
if migrated('item', item['item_id']):
return
bc = bc + (item['name'],)
if skip(bc):
breadcrumb('SKIP', bc)
return
breadcrumb('Item', bc)
if not DRY_RUN:
func, args = gc.createItem, (
parent['_id'],
item['name'],
item['description'],
True, # reuseExisting
)
newItem = get_or_create(
'item', item['item_id'], item['date_creation'], bc, func, args)
else:
newItem = None
if MIGRATE_FILES:
handle_file(item, newItem, bc)
if MIGRATE_METADATA:
try:
metadata = mc.get_item_metadata(item['item_id'], token)
except pydas.exceptions.InvalidPolicy:
# "The item must have at least one revision to have metadata"
metadata = None
if metadata:
metadata = dict((x['qualifier'], x['value']) for x in metadata)
breadcrumb('Metadata', bc, '[%d keys]' % len(metadata))
if not DRY_RUN:
gc.addMetadataToItem(newItem['_id'], metadata)
set_migrated('item', item['item_id'])
def handle_folder(folder, parent, parentType, depth, bc):
if migrated('folder', folder['folder_id']):
return
bc = bc + (folder['name'],)
if skip(bc):
breadcrumb('SKIP', bc)
return
breadcrumb('Folder', bc)
if not DRY_RUN:
func, args = gc.createFolder, (
parent['_id'],
folder['name'],
folder['description'],
parentType,
int(folder['privacy_status']) == 0,
)
newFolder = get_or_create(
'folder', folder['folder_id'], folder['date_creation'],
bc, func, args)
else:
newFolder = None
children = mc.folder_children(token, folder['folder_id'])
folders = children['folders']
items = children['items']
for child in folders:
handle_folder(child, newFolder, 'folder', depth + 1, bc)
# create items in parallel
Parallel(n_jobs=N_JOBS, backend='threading')(
delayed(handle_item)(item, newFolder, depth + 1, bc) for item in items)
set_migrated('folder', folder['folder_id'])
def handle_community(community):
if migrated('community', community['community_id']):
return
bc = ('collection', community['name'])
if skip(bc):
breadcrumb('SKIP', bc)
return
breadcrumb('Community', bc)
if not DRY_RUN:
func, args = gc.createCollection, (
community['name'],
community['description'],
int(community['privacy']) == 0,
)
newCollection = get_or_create(
'collection', community['community_id'], community['creation'],
bc, func, args)
else:
newCollection = None
children = mc.get_community_children(community['community_id'], token)
folders = children['folders']
for folder in folders:
handle_folder(folder, newCollection, 'collection', 1, bc)
set_migrated('community', community['community_id'])
def handle_user(user, args):
if migrated('user', user['user_id']):
return
bc = ('user', args[0])
if skip(bc):
breadcrumb('SKIP', bc)
return
breadcrumb('User', bc)
if not DRY_RUN:
func = gc.createUser
newUser = get_or_create(
'user', user['user_id'], user['creation'], bc, func, args)
else:
newUser = None
user_folder = mc.folder_get(token, user['folder_id'])
children = mc.folder_children(token, user_folder['folder_id'])
for folder in children['folders']:
handle_folder(folder, newUser, 'user', 1, bc)
set_migrated('user', user['user_id'])
def migrate_users():
seen = set()
users = mc.list_users(limit=0)
to_create = []
for user in users:
email = user['email']
first = user['firstname']
last = user['lastname']
password = generate_password(12, email)
admin = bool(int(user['admin']))
login = generate_login(user, seen)
seen.add(login)
logger.info('%s,%s,%s' % (email, login, password))
args = (login, email, first, last, password, admin)
to_create.append((user, args))
# create users in parallel
Parallel(n_jobs=N_JOBS, backend='threading')(
delayed(handle_user)(user, args) for user, args in to_create)
def migrate_collections():
communities = mc.list_communities(token)
for community in communities:
handle_community(community)
def login():
global token, mc, gc # TODO - not global
token = pydas.login(email=MIDAS_LOGIN, api_key=MIDAS_API_KEY, url=MIDAS_URL)
mc = pydas.session.communicator
gc = girder_client.GirderClient(apiUrl=GIRDER_URL)
gc.authenticate(username=GIRDER_LOGIN, apiKey=GIRDER_API_KEY)
def main():
login()
if MIGRATE_USERS:
migrate_users()
if MIGRATE_COLLECTIONS:
migrate_collections()
if __name__ == '__main__':
while True:
try:
main()
break
except pydas.exceptions.InvalidToken:
time.sleep(1)
| |
#!/usr/bin/python
import xml.etree.ElementTree as xml
import datetime as dt
import os, sys, time, re
import sqlite3
import json
from . import utils
from model import *
#SQLAlchemy
from sqlalchemy import create_engine, event, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql.expression import ClauseElement, desc
def main():
create("trialsDB.sqlite3", "trials_xml")
def create(dbPath, xmlFilesPath, startNumber, limit=0):
# Remove the database file if it already exists
try:
os.remove(dbPath)
except OSError:
pass
# Create the database file anew
try:
db = DBManager(dbPath, mutable=True)
db.open()
except DBException as e:
print e
sys.exit(1)
# Iteration state
skipFile = (startNumber > 0)
numberParsed = 0
idNumRE = re.compile('NCT0*(\d*).xml')
importer = TrialImporter(db)
# Walk through the xml files and add them to the DB
for root, dirs, files in os.walk(xmlFilesPath):
for filename in files:
if not filename.endswith('xml'):
continue
if skipFile:
m = idNumRE.match(filename)
thisID = 0
if m:
thisID = int(m.group(1))
if thisID >= startNumber:
skipFile = False
if not skipFile:
xmlTrial = XMLTrial.withPath(os.path.join(root, filename))
xmlTrial.populate()
importer.addTrial(xmlTrial)
numberParsed += 1
if limit > 0 and numberParsed >= limit:
break
importer.commitTrials()
db.close()
def update(dbPath, zipfile):
db = DBManager(dbPath, mutable=True)
db.open()
importer = TrialImporter(db)
for name in zipfile.namelist():
nctID = os.path.splitext(name)[0]
data = zipfile.read(name)
xmlTrial = XMLTrial(data, nctID)
xmlTrial.populate()
print "%s %s" % (nctID, xmlTrial.title)
importer.updateTrial(xmlTrial)
importer.commitTrials()
db.close()
###
# DBManager
###
class DBException(Exception):
pass
class DBManager(object):
def __init__(self, dbPath, mutable=False):
####
# Current user_version of the SQL database
####
self.user_version = 3
self.mutable = mutable
self.path = dbPath
# SQLAlchemy
self.engine = None
self.session = None
#SQLite3
self.cursor = None
self.connection = None
def open(self, force=False):
if self.mutable:
self.initializeSQLAlchemy()
else:
self.initializeSQLite(force)
def close(self):
if self.mutable:
self.session.close()
else:
self.cursor.close()
##
## SQLAlchemy for creating and updating the database
##
def pragmaOnConnect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
dbapi_con.execute('pragma user_version=%d' % self.user_version)
def initializeSQLAlchemy(self):
URL = 'sqlite:///%s' % self.path
self.engine = create_engine(URL, echo=False)
event.listen(self.engine, 'connect', self.pragmaOnConnect)
self.sessionMaker = sessionmaker(bind=self.engine)
event.listen(self.sessionMaker, 'after_flush', deleteSponsorOrphans)
self.session = self.sessionMaker()
Base.metadata.create_all(self.engine)
def newestLastChangedDate(self):
q = self.session.query(Trial.lastChangedDate).\
order_by(desc(Trial.lastChangedDate))
return q.first()[0]
def getOrCreate(self, model, defaults=None, **kwargs):
instance = self.session.query(model).filter_by(**kwargs).first()
if instance:
return instance #, False
else:
params = dict((k, v) for k, v in kwargs.iteritems() if not isinstance(v, ClauseElement))
instance = model(**params)
self.session.add(instance)
return instance #, True
def deleteTrialWithNCTIDIfNeeded(self, nctID, lastChangedDate):
shouldAdd = True
trial = self.session.query(Trial).filter_by(nctID=nctID).first()
if trial:
if trial.lastChangedDate < lastChangedDate:
self.session.delete(trial)
else:
shouldAdd = False
return shouldAdd
def commitContent(self):
self.session.commit()
##
## SQLite3 for querying the database with raw SQL
##
def initializeSQLite(self, force):
self.connection = sqlite3.connect(self.path)
self.cursor = self.connection.cursor()
if not force:
self.checkVersion()
def checkVersion(self):
self.cursor.execute('PRAGMA user_version;')
version = self.cursor.fetchone()[0]
if version != self.user_version:
raise DBException("Error opening database file: versions don't match",
{'script version' : self.user_version, 'database version' : version })
def executeAndFetchAll(self, *sql):
self.cursor.execute(*sql)
return self.cursor.fetchall()
###
# TrialImporter
###
class TrialImporter(object):
def __init__(self, dbManager):
self.db = dbManager
self.prayleACTs = None
self.currentNCTID = ""
self.parensRE = re.compile(".*\(([^\)]+)\).*")
shortNamesFile = open(utils.relativePath('sponsorShortNames.json'))
self.sponsorShortNames = json.load(shortNamesFile)
shortNamesFile.close()
def addTrial(self, xmlTrial):
if xmlTrial.isComplete():
self.currentNCTID = xmlTrial.nctID
print "will add trial: %s" % self.currentNCTID
sponsorClass = self.insertSponsorClass(xmlTrial)
sponsor = self.insertSponsor(xmlTrial, sponsorClass)
countries = self.insertCountries(xmlTrial)
interventions = self.insertInterventions(xmlTrial)
trial = self.insertTrial(xmlTrial, sponsor, countries, interventions)
def updateTrial(self, xmlTrial):
if self.db.deleteTrialWithNCTIDIfNeeded(xmlTrial.nctID, xmlTrial.lastChangedDate):
print "newer trial data imported for %s" % xmlTrial.nctID
self.addTrial(xmlTrial)
def sqlDate(self, date):
outDate = 0
if date is not None:
try:
outDate = time.strftime('%Y-%m-%d %H:%M:%S', date.timetuple())
except Exception as e:
print "%s: failed converting date '%s'" % (self.currentNCTID, date)
return outDate
def insertSponsorClass(self, trial):
return self.db.getOrCreate(SponsorClass, sclass=trial.sponsorClass)
def insertSponsor(self, trial, sponsorClass):
name = trial.leadSponsor
shortName = None
if name in self.sponsorShortNames:
shortName = self.sponsorShortNames[name]
# Pull out the shortname if needed, and if available
if shortName is None and "(" in name:
m = self.parensRE.match(name)
if m:
shortName = m.groups()[0]
return self.db.getOrCreate(Sponsor, name=name, shortName=shortName, sclass=sponsorClass)
def insertCountries(self, trial):
outCountries = []
for country in trial.countries:
outCountries.append(self.db.getOrCreate(Country, name=country))
return outCountries
def insertTrial(self, xmlTrial, sponsor, countries, interventions):
trial = Trial(xmlTrial.nctID)
trial.countries = countries
trial.sponsor = sponsor
trial.interventions = interventions
for col in trial.domesticKeys:
value = getattr(xmlTrial, col, None)
if value:
setattr(trial, col, value)
self.db.session.add(trial)
return trial
def insertInterventions(self, xmlTrial):
interventions = []
for iDict in xmlTrial.interventions:
itype = self.db.getOrCreate(InterventionType, itype=iDict["type"])
intervention = Intervention(iDict["name"], itype)
interventions.append(intervention)
self.db.session.add(intervention)
return interventions
def commitTrials(self):
self.db.commitContent()
def trialIncludedInPrayle(self, trial):
if self.prayleACTs is None:
praylePath = utils.relativePath('Prayle2012ACTs.txt')
self.prayleACTs = [line.strip() for line in open(praylePath)]
if trial.nctID in self.prayleACTs:
return 1
else:
return 0
###
# XMLTrial
###
class XMLTrial(object):
def __init__(self, data, nctID):
self.fields = []
self.string = data
# Header fields
# self.headerFields = ['NCT ID', 'Lead Sponsor', 'Sponsor Class', 'Recruitment', 'Interventions',
# 'Start Date', 'Completion Date', 'Primary Completion Date', 'Results Date',
# 'Phase', 'Countries']
# Field variables
self.nctID = nctID
self.leadSponsor = ""
self.sponsorClass = ""
self.recruitment = ""
self.interventions = []
self.startDate = ''
self.completionDate = ''
self.primaryCompletionDate = ''
self.resultsDate = ''
self.lastChangedDate = ''
self.phaseMask = 0
self.countries = []
self.title = ""
@classmethod
def withPath(cls, path):
data = open(path).read()
nctID = os.path.splitext(os.path.basename(path))[0]
return cls(data, nctID)
def populate(self):
self.parseXML()
###
# Getting the data and parsing it
###
def parseXML(self):
etree = xml.fromstring(self.string)
# Pull out the data
self.title = etree.find("brief_title").text
self.leadSponsor = etree.find("sponsors/lead_sponsor/agency").text
self.sponsorClass = etree.find("sponsors/lead_sponsor/agency_class").text
self.recruitment = etree.find("overall_status").text
self.phaseMask = self.parsePhaseMask(etree.find("phase").text)
for e in etree.findall("location_countries/country"):
self.countries.append(e.text)
for e in etree.findall("intervention"):
interventionDict = {'type': e.find("intervention_type").text,
'name': e.find("intervention_name").text}
self.interventions.append(interventionDict)
# Dates
self.startDate = self.parseDate(etree.find("start_date"))
self.completionDate = self.parseDate(etree.find("completion_date"))
self.primaryCompletionDate = self.parseDate(etree.find("primary_completion_date"))
self.resultsDate = self.parseDate(etree.find("firstreceived_results_date"))
self.lastChangedDate = self.parseDate(etree.find("lastchanged_date"))
def isComplete(self):
"""This will probably contain more checks"""
return self.startDate is not None
def parseDate(self, date):
stringToParse = ''
# outDate = datetime.date(datetime.MINYEAR, 1, 1) # MINYEAR = invalid date
outDate = None
if isinstance(date, xml.Element):
stringToParse = date.text
elif isinstance(date, str):
stringToParse = date
if len(stringToParse):
try:
outDate = dt.datetime.strptime(stringToParse, '%B %d, %Y').date()
except Exception as e:
try:
outDate = dt.datetime.strptime(stringToParse, '%B %Y').date()
except Exception as e:
print "Failed parsing date for %s, '%s': %s" % (self.nctID, stringToParse, e)
return outDate
def parsePhaseMask(self, phaseText):
outMask = 0
if "0" in phaseText:
outMask += 1 << 0
if "1" in phaseText:
outMask += 1 << 1
if "2" in phaseText:
outMask += 1 << 2
if "3" in phaseText:
outMask += 1 << 3
if "4" in phaseText:
outMask += 1 << 4
return outMask
# Default function is main()
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python3
from tkinter import *
from tkinter import ttk
from geometry import *
from random import randint
from collections import defaultdict
from operator import methodcaller
import math
PHI = (1 + 5**0.5) / 2
canvas_height = 300
TRIANGLE_MODE = 0
LINE_MODE = 1
RIGHT_MODE = 2
RECT_MODE = 3
mode = TRIANGLE_MODE
rectangles = dict()
# A temporary list of points the user placed on the canvas
points = list()
# A dictionary of triangles
triangles = dict()
def random_color():
color = randint(0, 0xFFFFFF)
return hex_color(color)
# A dictionary that colors the triangle
colors = defaultdict(random_color)
# pivot to rotate around
global_pivot = None
def hex_color(color):
return '#' + hex(color)[2:].rjust(6, '0')
def draw_shape(shape):
global color, color_inc
tris = shape.triangles
for t in tris:
make_triangle(t)
for p in shape.convex_hull():
ex, ey = p
i = canvas.create_oval((ex - 2, ey - 2, ex + 2, ey + 2))
canvas.addtag('point', 'withtag', i)
def draw_rect(shape):
listi = []
for t in shape.triangles:
listi.append(make_triangle(t))
rectangles[shape] = listi
def squash(shape):
def callback(event):
iss = rectangles[shape]
squish = shape.squish_rectangle()
del rectangles[shape]
for i in iss:
canvas.delete(i)
draw_shape(squish)
return callback
def add_point(event):
"""Adds a point to the canvas. If there are three loose points, they will
be connected to form a triangle."""
global mode, triangles
if mode==RIGHT_MODE:
mode = TRIANGLE_MODE
return
if mode == RECT_MODE:
mode = TRIANGLE_MODE
return
ex, ey = event.x, event.y
points.append((ex, ey))
i = canvas.create_oval((ex - 2, ey - 2, ex + 2, ey + 2))
canvas.addtag('point', 'withtag', i)
if mode == LINE_MODE and len(points) >= 2:
p1, p2 = points.pop(), points.pop()
x1, y1 = p1
x2, y2 = p2
i = canvas.create_line((x1, y1, x2, y2))
canvas.addtag('line', 'withtag', i)
new_shapes = list()
for t in triangles.values():
u, d = t.split(LineSegment(p1, p2).to_line())
new_shapes.append(u)
new_shapes.append(d)
canvas.delete('triangle')
triangles = dict()
for s in new_shapes:
draw_shape(s)
mode = TRIANGLE_MODE
while len(points) >= 3 and mode == TRIANGLE_MODE:
p1, p2, p3 = points.pop(), points.pop(), points.pop()
make_triangle(Triangle((p1, p2, p3)))
def set_pivot(event):
global global_pivot
global_pivot = (event.x, event.y)
def update_rotate(x):
rotate_display.set(x)
def make_triangle(tri):
ni = canvas.create_polygon(tri.points, fill=colors[tri])
canvas.addtag('triangle', 'withtag', ni)
canvas.tag_bind(ni, '<Button-1>', rotate_triangle(ni))
triangles[ni] = tri
return ni
def rotate_triangle(i):
def rotate_tri(event):
if mode == RIGHT_MODE:
a, b = triangles[i].to_rightangle()
del triangles[i]
canvas.delete(i)
make_triangle(a)
make_triangle(b)
elif mode == RECT_MODE:
s = triangles[i].to_rectangle()
del triangles[i]
canvas.delete(i)
draw_rect(s)
else:
new_tri = triangles[i].rotate(global_pivot, rotate.get() * math.pi / 180)
print(rotate.get() * math.pi / 180)
del triangles[i]
canvas.delete(i)
make_triangle(new_tri)
return rotate_tri
def clear_canvas():
global points, triangles, global_pivot, rectangles
points = list()
triangles = dict()
global_pivot = None
rectangles = dict()
canvas.delete('triangle', 'point', 'line')
canvas.delete('triangle', 'point', 'line', 'rectangle')
def set_state(state):
global mode
mode = state
def square_rects():
global rectangles
squares = list()
for r, iss in rectangles.items():
squares.append(r.square_rectangle())
for i in iss:
canvas.delete(i)
rectangles = dict()
for s in squares:
draw_rect(s)
def orientate_shapes():
global rectangles
new_rect = (r.orientate() for r in rectangles)
for iss in rectangles.values():
for i in iss:
canvas.delete(i)
rectangles = dict()
for r in new_rect:
draw_rect(r)
def merge_shapes():
global rectangles
new_rect = None
for r in rectangles:
if new_rect == None:
new_rect = r
else:
new_rect = new_rect.merge_square(r)
for iss in rectangles.values():
for i in iss:
canvas.delete(i)
rectangles = dict()
new_rect = new_rect.orientate()
x, y = new_rect.convex_hull()[0]
new_rect = new_rect.translate((100 - x, 100 - y))
draw_rect(new_rect)
root = Tk()
frame = ttk.Frame(root)
canvas = Canvas(frame, width=canvas_height*PHI, height=canvas_height)
canvas.bind('<Button-1>', add_point)
canvas.bind('<Button-3>', set_pivot)
rotate = ttk.Scale(frame, length=200, from_=-360, to=360, orient=HORIZONTAL,
command=update_rotate)
rotate_label = ttk.Label(frame, text=rotate.get())
rotate_display = StringVar()
rotate_label['textvariable'] = rotate_display
clear = ttk.Button(frame, text='Clear', command = clear_canvas)
cutLine = ttk.Button(frame, text='Cut By Line', command = lambda : set_state(LINE_MODE))
right_angle = ttk.Button(frame, text='Right-angle', command = lambda : set_state(RIGHT_MODE))
rectangle = ttk.Button(frame, text='Rectangle', command = lambda : set_state(RECT_MODE))
square = ttk.Button(frame, text='Square', command = square_rects)
orientate = ttk.Button(frame, text='Orientate', command = orientate_shapes)
merge = ttk.Button(frame, text='Merge', command = merge_shapes)
frame.grid()
canvas.grid()
rotate.grid()
rotate_label.grid()
clear.grid()
cutLine.grid()
right_angle.grid()
rectangle.grid()
square.grid()
orientate.grid()
merge.grid()
root.mainloop()
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Alarm.last_checked'
db.alter_column(u'ddsc_core_alarm', 'last_checked', self.gf('django.db.models.fields.DateTimeField')(null=True))
def backwards(self, orm):
# Changing field 'Alarm.last_checked'
db.alter_column(u'ddsc_core_alarm', 'last_checked', self.gf('django.db.models.fields.DateTimeField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ddsc_core.alarm': {
'Meta': {'object_name': 'Alarm'},
'active_status': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_cr': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True', 'blank': 'True'}),
'first_born': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_checked': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'logical_check': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'message_type': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'previous_alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']", 'null': 'True', 'blank': 'True'}),
'single_or_group': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['contenttypes.ContentType']"}),
'template': ('django.db.models.fields.TextField', [], {'default': "u'this is a alarm message template'"}),
'urgency': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
u'ddsc_core.alarm_active': {
'Meta': {'object_name': 'Alarm_Active'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']"}),
'deactivated_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1900, 1, 1, 0, 0)'}),
'first_triggered_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1900, 1, 1, 0, 0)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {})
},
u'ddsc_core.alarm_item': {
'Meta': {'object_name': 'Alarm_Item'},
'alarm': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Alarm']"}),
'alarm_type': ('django.db.models.fields.related.ForeignKey', [], {'default': '1', 'to': "orm['contenttypes.ContentType']"}),
'comparision': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_born': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logical_check': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value_bool': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_double': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_int': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'ddsc_core.compartment': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Compartment'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.folder': {
'Meta': {'object_name': 'Folder'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.idmapping': {
'Meta': {'object_name': 'IdMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'timeseries': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Timeseries']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
u'ddsc_core.location': {
'Meta': {'object_name': 'Location'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 14, 0, 0)'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'geometry_precision': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'point_geometry': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '4258', 'null': 'True', 'blank': 'True'}),
'real_geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {'srid': '4258', 'null': 'True', 'blank': 'True'}),
'relative_location': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'show_on_map': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'ddsc_core.locationtype': {
'Meta': {'object_name': 'LocationType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'location_types'", 'blank': 'True', 'to': u"orm['ddsc_core.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'})
},
u'ddsc_core.logicalgroup': {
'Meta': {'ordering': "[u'owner', u'name']", 'unique_together': "((u'owner', u'name'),)", 'object_name': 'LogicalGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']"}),
'timeseries': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'logical_groups'", 'blank': 'True', 'to': u"orm['ddsc_core.Timeseries']"})
},
u'ddsc_core.logicalgroupedge': {
'Meta': {'unique_together': "((u'child', u'parent'),)", 'object_name': 'LogicalGroupEdge'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'parents'", 'to': u"orm['ddsc_core.LogicalGroup']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'childs'", 'to': u"orm['ddsc_core.LogicalGroup']"})
},
u'ddsc_core.logrecord': {
'Meta': {'object_name': 'LogRecord'},
'host': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.CharField', [], {'max_length': '8', 'db_index': 'True'}),
'line': ('django.db.models.fields.SmallIntegerField', [], {}),
'message': ('django.db.models.fields.TextField', [], {}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'ddsc_core.manufacturer': {
'Meta': {'object_name': 'Manufacturer'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'})
},
u'ddsc_core.measuringdevice': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringDevice'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.measuringmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'MeasuringMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'titel': ('django.db.models.fields.CharField', [], {'max_length': '600', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.parameter': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Parameter'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'cas_number': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sikb_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.processingmethod': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ProcessingMethod'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.referenceframe': {
'Meta': {'ordering': "[u'description']", 'object_name': 'ReferenceFrame'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'ddsc_core.source': {
'Meta': {'object_name': 'Source'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 14, 0, 0)'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'manufacturer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Manufacturer']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'source_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'timeout': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
u'ddsc_core.sourcegroup': {
'Meta': {'object_name': 'SourceGroup'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Source']", 'symmetrical': 'False'})
},
u'ddsc_core.statuscache': {
'Meta': {'object_name': 'StatusCache'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'mean_val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'min_val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modify_timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1900, 1, 1, 0, 0)'}),
'nr_of_measurements_doubtful': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'nr_of_measurements_reliable': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'nr_of_measurements_total': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'nr_of_measurements_unreliable': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'status_date': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'std_val': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'timeseries': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Timeseries']"})
},
u'ddsc_core.timeseries': {
'Meta': {'object_name': 'Timeseries'},
'compartment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Compartment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 14, 0, 0)'}),
'data_set': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'timeseries'", 'blank': 'True', 'to': "orm['lizard_security.DataSet']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'blank': 'True'}),
'first_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latest_value_number': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'latest_value_timestamp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'timeseries'", 'null': 'True', 'to': u"orm['ddsc_core.Location']"}),
'measuring_device': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringDevice']", 'null': 'True', 'blank': 'True'}),
'measuring_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.MeasuringMethod']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']", 'null': 'True', 'blank': 'True'}),
'parameter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Parameter']"}),
'processing_method': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ProcessingMethod']", 'null': 'True', 'blank': 'True'}),
'reference_frame': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.ReferenceFrame']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Source']", 'null': 'True', 'blank': 'True'}),
'supplying_systems': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'through': u"orm['ddsc_core.IdMapping']", 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ddsc_core.Unit']"}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36', 'blank': 'True'}),
'validate_diff_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_diff_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_max_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_max_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_min_hard': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'validate_min_soft': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_type': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'})
},
u'ddsc_core.timeseriesgroup': {
'Meta': {'object_name': 'TimeseriesGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'parameters': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Parameter']", 'symmetrical': 'False'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['ddsc_core.Source']", 'symmetrical': 'False'})
},
u'ddsc_core.timeseriesselectionrule': {
'Meta': {'ordering': "[u'pk']", 'object_name': 'TimeseriesSelectionRule'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'criterion': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'operator': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
u'ddsc_core.unit': {
'Meta': {'ordering': "[u'description']", 'object_name': 'Unit'},
'begin_date': ('django.db.models.fields.DateField', [], {}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '12'}),
'conversion_factor': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '60'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '60', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'lizard_security.dataowner': {
'Meta': {'ordering': "['name']", 'object_name': 'DataOwner'},
'data_managers': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'remarks': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'lizard_security.dataset': {
'Meta': {'ordering': "['owner', 'name']", 'unique_together': "(('owner', 'name'),)", 'object_name': 'DataSet'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_security.DataOwner']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['ddsc_core']
| |
#Copyright ReportLab Europe Ltd. 2000-2004
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/docs/userguide/ch1_intro.py
from tools.docco.rl_doc_utils import *
from reportlab.platypus.tableofcontents import TableOfContents
from datetime import datetime
import reportlab
title("ReportLab PDF Library")
title("User Guide")
centred('ReportLab Version ' + reportlab.Version)
centred(datetime.now().strftime('Document generated on %Y/%m/%d %H:%M:%S %Z'))
nextTemplate("TOC")
headingTOC()
toc = TableOfContents()
PS = ParagraphStyle
toc.levelStyles = [
PS(fontName='Times-Bold', fontSize=14, name='TOCHeading1', leftIndent=20, firstLineIndent=-20, spaceBefore=5, leading=16),
PS(fontSize=12, name='TOCHeading2', leftIndent=40, firstLineIndent=-20, spaceBefore=0, leading=12),
PS(fontSize=10, name='TOCHeading3', leftIndent=60, firstLineIndent=-20, spaceBefore=0, leading=12),
PS(fontSize=10, name='TOCHeading4', leftIndent=100, firstLineIndent=-20, spaceBefore=0, leading=12),
]
getStory().append(toc)
nextTemplate("Normal")
########################################################################
#
# Chapter 1
#
########################################################################
heading1("Introduction")
heading2("About this document")
disc("""This document is an introduction to the ReportLab PDF library.
Some previous programming experience
is presumed and familiarity with the Python Programming language is
recommended. If you are new to Python, we tell you in the next section
where to go for orientation.
""")
disc("""
This manual does not cover 100% of the features, but should explain all
the main concepts and help you get started, and point you at other
learning resources.
After working your way through this, you should be ready to begin
writing programs to produce sophisticated reports.
""")
disc("""In this chapter, we will cover the groundwork:""")
bullet("What is ReportLab all about, and why should I use it?")
bullet("What is Python?")
bullet("How do I get everything set up and running?")
todo("""
We need your help to make sure this manual is complete and helpful.
Please send any feedback to our user mailing list,
which is signposted from <a href="http://www.reportlab.org/">www.reportlab.org</a>.
""")
heading2("What is the ReportLab PDF Library?")
disc("""This is a software library that lets you directly
create documents in Adobe's Portable Document Format (PDF) using
the Python programming language. It also creates charts and data graphics
in various bitmap and vector formats as well as PDF.""")
disc("""PDF is the global standard for electronic documents. It
supports high-quality printing yet is totally portable across
platforms, thanks to the freely available Acrobat Reader. Any
application which previously generated hard copy reports or driving a printer
can benefit from making PDF documents instead; these can be archived,
emailed, placed on the web, or printed out the old-fashioned way.
However, the PDF file format is a complex
indexed binary format which is impossible to type directly.
The PDF format specification is more than 600 pages long and
PDF files must provide precise byte offsets -- a single extra
character placed anywhere in a valid PDF document can render it
invalid. This makes it harder to generate than HTML.""")
disc("""Most of the world's PDF documents have been produced
by Adobe's Acrobat tools, or rivals such as JAWS PDF Creator, which act
as 'print drivers'. Anyone wanting to automate PDF production would
typically use a product like Quark, Word or Framemaker running in a loop
with macros or plugins, connected to Acrobat. Pipelines of several
languages and products can be slow and somewhat unwieldy.
""")
disc("""The ReportLab library directly creates PDF based on
your graphics commands. There are no intervening steps. Your applications
can generate reports extremely fast - sometimes orders
of magnitude faster than traditional report-writing
tools. This approach is shared by several other libraries - PDFlib for C,
iText for Java, iTextSharp for .NET and others. However, The ReportLab library
differs in that it can work at much higher levels, with a full featured engine
for laying out documents complete with tables and charts. """)
disc("""In addition, because you are writing a program
in a powerful general purpose language, there are no
restrictions at all on where you get your data from,
how you transform it, and the kind of output
you can create. And you can reuse code across
whole families of reports.""")
disc("""The ReportLab library is expected to be useful
in at least the following contexts:""")
bullet("Dynamic PDF generation on the web")
bullet("High-volume corporate reporting and database publishing")
bullet("""An embeddable print engine for other applications, including
a 'report language' so that users can customize their own reports. <i>
This is particularly relevant to cross-platform apps which cannot
rely on a consistent printing or previewing API on each operating
system</i>.""")
bullet("""A 'build system' for complex documents with charts, tables
and text such as management accounts, statistical reports and
scientific papers """)
bullet("""Going from XML to PDF in one step!""")
heading2("What is Python?")
disc("""
Python is an <i>interpreted, interactive, object-oriented</i> programming language. It is often compared to Tcl, Perl,
Scheme or Java.
""")
disc("""
Python combines remarkable power with very clear syntax. It has modules, classes, exceptions, very high level
dynamic data types, and dynamic typing. There are interfaces to many system calls and libraries, as well as to
various windowing systems (X11, Motif, Tk, Mac, MFC). New built-in modules are easily written in C or C++.
Python is also usable as an extension language for applications that need a programmable interface.
""")
disc("""
Python is as old as Java and has been growing steadily in popularity for years; since our
library first came out it has entered the mainstream. Many ReportLab library users are
already Python devotees, but if you are not, we feel that the language is an excellent
choice for document-generation apps because of its expressiveness and ability to get
data from anywhere.
""")
disc("""
Python is copyrighted but <b>freely usable and distributable, even for commercial use</b>.
""")
heading2("Acknowledgements")
disc("""Many people have contributed to ReportLab. We would like to thank
in particular (in approximately chronological order) Chris Lee, Magnus Lie Hetland,
Robert Kern, Jeff Bauer (who contributed normalDate.py); Jerome Alet (numerous patches
and the rlzope demo), Andre Reitz, Max M, Albertas Agejevas, T Blatter, Ron Peleg,
Gary Poster, Steve Halasz, Andrew Mercer, Paul McNett, Chad Miller, Tim Roberts,
Jorge Godoy and Benn B.""")
disc("""Special thanks go to Just van Rossum for his valuable assistance with
font technicalities.""")
disc("""Marius Gedminas deserves a big hand for contributing the work on TrueType fonts and we
are glad to include these in the toolkit. Finally we thank Michal Kosmulski for the DarkGarden font
for and Bitstream Inc. for the Vera fonts.""")
heading2("Installation and Setup")
heading3("A note on available versions")
disc("""The latest version of the ReportLab library can be found at
^http://www.reportlab.org/downloads.html^. Older versions can be found at ^http://www.reportlab.com/ftp/^.
Each successive version is stored in both zip
and tgz format, but the contents are identical apart from line endings.
Versions are numbered: $ReportLab_1_00.zip$, $ReportLab_1_01.zip$ and so on. The
latest stable version is also available as just $reportlab.zip$ (or
$reportlab.tgz$), which is actually a symbolic link to the latest
numbered version. Daily snapshots of the trunk are available as
$current.zip$ or $current.tgz$.
Finally, from version 2.3 onwards, there is also a Windows installer
available for Python versions 2.3 - 2.6, named $ReportLab-2.x.win32-py2.x.exe$
""")
heading3("Installation on Windows")
restartList()
list("""First, install Python from $http://www.python.org/.$
Reportlab 2.x works with Python 2.3 upwards but we recommend to use
the latest stable version of Python 2.5.
After installing, you should be able to run the
'Python (command line)' option from the Start Menu.
""")
list("""We strongly recommend installing the Python Windows
Extensions, which gives you access to Windows data sources, COM support, WinAPI calls, and the PythonWin IDE. This
can be found at ^http://sourceforge.net/projects/pywin32/^.
Once this is installed, you can start
Pythonwin from the Start Menu and get a GUI application.
""")
list("""Install the Python Imaging Library ($PIL$) from $http://www.pythonware.com/products/pil/$. This
step is optional but allows you to include images in your reports.
""")
list("""Now you are ready to install reportlab itself.
The easiest way to do this is to use the .exe installer for Windows, which
installs both the ReportLab source code and the precompiled DLLs for you.
""")
list("""
If, however, you wish to install from source, download and unzip the archive
from http://www.reportlab.org/downloads.html and copy the $reportlab$ directory
onto your PythonPath; You should now be able to go to a Python
command line interpreter and type $import reportlab$ without getting
an error message.
""")
list("""Next, Download the zip file of precompiled DLLs for your Python version from
the bottom of the ^http://www.reportlab.org/downloads.html^ downloads page, and unzip
them into ^C:\Python2x\lib\site-packages^ (or its equivalent for other Python versions
""")
list("""Open up a $MS-DOS$ command prompt and CD to
"$reportlab\\..\\tests$". Enter "$runAll.py$". You should see lots of dots
and no error messages. This will also create many PDF files and generate
the manuals in ^reportlab/docs^ (including this one). """)
list("""
Finally, we recommend you download and run the script ^rl_check.py^ from
^http://www.reportlab.org/ftp/^. This will health-check all the above
steps and warn you if anything is missing or mismatched.""")
heading3("Installation instructions for Unix")
disc("""
""")
restartList()
list("""First, install Python. On a large number of Unix and Linux distributions, Python is already installed,
or is available as a standard package you can install with the relevant package manager.""")
list("""
You will also need to install the Freetype 2 Font Engine, Python Imaging Library, and the gzip library,
along with a C compiler.
""")
list("""You will also need the source code or relevant dev packages for Python and the FreeType 2 Font engine.
""")
list("""
Download the latest ReportLab.tgz from the download page on http://www.reportlab.org.
""")
list("""
Unpack the archive and follow the instructions in INSTALL.txt.
""")
list("""You should now be able to run python and execute the python statement
$import reportlab$ without errors.
""")
heading3("Instructions for Python novices: Mac")
disc("""
This is much, much easier with Mac OS X since Python is installed on your
system as standard. Just follow the instructions for installing the ReportLab archive
above.
""")
heading2("Getting Involved")
disc("""ReportLab is an Open Source project. Although we are
a commercial company we provide the core PDF generation
sources freely, even for commercial purposes, and we make no income directly
from these modules. We also welcome help from the community
as much as any other Open Source project. There are many
ways in which you can help:""")
bullet("""General feedback on the core API. Does it work for you?
Are there any rough edges? Does anything feel clunky and awkward?""")
bullet("""New objects to put in reports, or useful utilities for the library.
We have an open standard for report objects, so if you have written a nice
chart or table class, why not contribute it?""")
bullet("""Demonstrations and Case Studies: If you have produced some nice
output, send it to us (with or without scripts). If ReportLab solved a
problem for you at work, write a little 'case study' and send it in.
And if your web site uses our tools to make reports, let us link to it.
We will be happy to display your work (and credit it with your name
and company) on our site!""")
bullet("""Working on the core code: we have a long list of things
to refine or to implement. If you are missing some features or
just want to help out, let us know!""")
disc("""The first step for anyone wanting to learn more or
get involved is to join the mailing list. To Subscribe visit
$http://two.pairlist.net/mailman/listinfo/reportlab-users$.
From there you can also browse through the group's archives
and contributions. The mailing list is
the place to report bugs and get support. """)
heading2("Site Configuration")
disc("""There are a number of options which most likely need to be configured globally for a site.
The python script module $reportlab/rl_config.py$ may be edited to change the values of several
important sitewide properties.""")
bullet("""verbose: set to integer values to control diagnostic output.""")
bullet("""shapeChecking: set this to zero to turn off a lot of error checking in the graphics modules""")
bullet("""defaultEncoding: set this to WinAnsiEncoding or MacRomanEncoding.""")
bullet("""defaultPageSize: set this to one of the values defined in reportlab/lib/pagesizes.py; as delivered
it is set to pagesizes.A4; other values are pagesizes.letter etc.""")
bullet("""defaultImageCaching: set to zero to inhibit the creation of .a85 files on your
hard-drive. The default is to create these preprocessed PDF compatible image files for faster loading""")
bullet("""T1SearchPath: this is a python list of strings representing directories that
may be queried for information on Type 1 fonts""")
bullet("""TTFSearchPath: this is a python list of strings representing directories that
may be queried for information on TrueType fonts""")
bullet("""CMapSearchPath: this is a python list of strings representing directories that
may be queried for information on font code maps.""")
bullet("""showBoundary: set to non-zero to get boundary lines drawn.""")
bullet("""ZLIB_WARNINGS: set to non-zero to get warnings if the Python compression extension is not found.""")
bullet("""pageComression: set to non-zero to try and get compressed PDF.""")
bullet("""allowtableBoundsErrors: set to 0 to force an error on very large Platypus table elements""")
bullet("""emptyTableAction: Controls behaviour for empty tables, can be 'error' (default), 'indicate' or 'ignore'.""")
heading2("Learning More About Python")
disc("""
If you are a total beginner to Python, you should check out one or more from the
growing number of resources on Python programming. The following are freely
available on the web:
""")
bullet("""<b>Introductory Material on Python. </b>
A list of tutorials on the Python.org web site.
$http://www.python.org/doc/Intros.html$
""")
bullet("""<b>Python Tutorial. </b>
The official Python Tutorial by Guido van Rossum (edited by Fred L. Drake, Jr.)
$http://www.python.org/doc/tut/$
""")
bullet("""<b>Learning to Program. </b>
A tutorial on programming by Alan Gauld. Has a heavy emphasis on
Python, but also uses other languages.
$http://www.freenetpages.co.uk/hp/alan.gauld/$
""")
bullet("""<b>How to think like a computer scientist</b> (Python version)</b>.
$http://www.ibiblio.org/obp/thinkCSpy/$
""")
bullet("""<b>Instant Python</b>.
A 6-page minimal crash course by Magnus Lie Hetland.
$http://www.hetland.org/python/instant-python.php$
""")
bullet("""<b>Dive Into Python</b>.
A free Python tutorial for experienced programmers.
$http://diveintopython.org/$
""")
from reportlab.lib.codecharts import SingleByteEncodingChart
from tools.docco.stylesheet import getStyleSheet
styles = getStyleSheet()
indent0_style = styles['Indent0']
indent1_style = styles['Indent1']
heading2("What's New in ReportLab 2.0")
disc("""
Many new features have been added, foremost amongst which is the support
for unicode. This page documents what has changed since version 1.20.""")
disc("""
Adding full unicode support meant that we had to break backwards-compatibility,
so old code written for ReportLab 1 will sometimes need changes before it will
run correctly with ReportLab 2. Now that we have made the clean break to
introduce this important new feature, we intend to keep the API
backwards-compatible throughout the 2.* series.
""")
heading3("Goals for the 2.x series")
disc("""
The main rationale for 2.0 was an incompatible change at the character level:
to properly support Unicode input. Now that it's out we will maintain compatibility
with 2.0. There are no pressing feature wishlists and new features will be driven,
as always, by contributions and the demands of projects.""")
disc("""
Our 1.x code base is still Python 2.1 compatible. The new version lets us move forwards
with a baseline of Python 2.4 (2.3 will work too, for the moment, but we don't promise
that going forwards) so we can use newer language features freely in our development.""")
disc("""
One area where we do want to make progress from release to release is with documentation
and installability. We'll be looking into better support for distutils, setuptools,
eggs and so on; and into better examples and tools to help people learn what's in the
(substantial) code base.""")
disc("""
Bigger ideas and more substantial rewrites are deferred to Version 3.0, with no particular
target dates.
""")
heading3("Contributions")
disc("""Thanks to everybody who has contributed to the open-source toolkit in the run-up
to the 2.0 release, whether by reporting bugs, sending patches, or contributing to the
reportlab-users mailing list. Thanks especially to the following people, who contributed
code that has gone into 2.0: Andre Reitz, Max M, Albertas Agejevas, T Blatter, Ron Peleg,
Gary Poster, Steve Halasz, Andrew Mercer, Paul McNett, Chad Miller.
""")
todo("""If we missed you, please let us know!""")
heading3("Unicode support")
disc("""
This is the Big One, and the reason some apps may break. You must now pass in text either
in UTF-8 or as unicode string objects. The library will handle everything to do with output
encoding. There is more information on this below.
Since this is the biggest change, we'll start by reviewing how it worked in the past.""")
disc("""
In ReportLab 1.x, any string input you passed to our APIs was supposed to be in the same
encoding as the font you selected for output. If using the default fonts in Acrobat Reader
(Helvetica/Times/Courier), you would have implicitly used WinAnsi encoding, which is almost
exactly the same as Latin-1. However, if using TrueType fonts, you would have been using UTF-8.""")
disc("""For Asian fonts, you had a wide choice of encodings but had to specify which one
(e.g Shift-JIS or EUC for Japanese). This state of affairs meant that you had
to make sure that every piece of text input was in the same encoding as the font used
to display it.""")
disc("""Input text encoding is UTF-8 or Python Unicode strings""")
disc("""
Any text you pass to a canvas API (drawString etc.), Paragraph or other flowable
constructor, into a table cell, or as an attribute of a graphic (e.g. chart.title.text),
is supposed to be unicode. If you use a traditional Python string, it is assumed to be UTF-8.
If you pass a Unicode object, we know it's unicode.""", style=indent1_style)
disc("""Font encodings""")
disc("""
Fonts still work in different ways, and the built-in ones will still use WinAnsi or MacRoman
internally while TrueType will use UTF-8. However, the library hides this from you; it converts
as it writes out the PDF file. As before, it's still your job to make sure the font you use has
the characters you need, or you may get either a traceback or a visible error character.""",style=indent1_style)
disc("""Asian CID fonts""")
disc("""
You no longer need to specify the encoding for the built-in Asian fonts, just the face name.
ReportLab knows about the standard fonts in Adobe's Asian Language Packs
""", style=indent1_style)
disc("""Asian Truetype fonts""")
disc("""
The standard Truetype fonts differ slightly for Asian languages (e.g msmincho.ttc).
These can now be read and used, albeit somewhat inefficiently.
""", style=indent1_style)
disc("""Asian word wrapping""")
disc("""
Previously we could display strings in Asian languages, but could not properly
wrap paragraphs as there are no gaps between the words. We now have a basic word wrapping
algorithm.
""", style=indent1_style)
disc("""unichar tag""")
disc("""
A convenience tag, <unichar/> has also been added. You can now do <unichar code="0xfc"/>
or <unichar name='LATIN SMALL LETTER U WITH DIAERESIS'/> and
get a lowercase u umlaut. Names should be those in the Unicode Character Database.
""", style=indent1_style)
disc("""Accents, greeks and symbols""")
disc("""
The correct way to refer to all non-ASCII characters is to use their unicode representation.
This can be literal Unicode or UTF-8. Special symbols and Greek letters (collectively, "greeks")
inserted in paragraphs using the greek tag (e.g. <greek>lambda</greek>) or using the entity
references (e.g. λ) are now processed in a different way than in version 1.""", style=indent1_style)
disc("""
Previously, these were always rendered using the Zapf Dingbats font. Now they are always output
in the font you specified, unless that font does not support that character. If the font does
not support the character, and the font you specified was an Adobe Type 1 font, Zapf Dingbats
is used as a fallback. However, at present there is no fallback in the case of TTF fonts.
Note that this means that documents that contain greeks and specify a TTF font may need
changing to explicitly specify the font to use for the greek character, or you will see a black
square in place of that character when you view your PDF output in Acrobat Reader.
""", style=indent1_style)
# Other New Features Section #######################
heading3("Other New Features")
disc("""PDF""")
disc("""Improved low-level annotation support for PDF "free text annotations"
""", style=indent0_style)
disc("""FreeTextAnnotation allows showing and hiding of an arbitrary PDF "form"
(reusable chunk of PDF content) depending on whether the document is printed or
viewed on-screen, or depending on whether the mouse is hovered over the content, etc.
""", style=indent1_style)
disc("""TTC font collection files are now readable"
""", style=indent0_style)
disc("""ReportLab now supports using TTF fonts packaged in .TTC files""", style=indent1_style)
disc("""East Asian font support (CID and TTF)""", style=indent0_style)
disc("""You no longer need to specify the encoding for the built-in Asian fonts,
just the face name. ReportLab knows about the standard fonts in Adobe's Asian Language Packs.
""", style=indent1_style)
disc("""Native support for JPEG CMYK images""", style=indent0_style)
disc("""ReportLab now takes advantage of PDF's native JPEG CMYK image support,
so that JPEG CMYK images are no longer (lossily) converted to RGB format before including
them in PDF.""", style=indent1_style)
disc("""Platypus""")
disc("""Link support in paragraphs""", style=indent0_style)
disc("""
Platypus paragraphs can now contain link elements, which support both internal links
to the same PDF document, links to other local PDF documents, and URL links to pages on
the web. Some examples:""", style=indent1_style)
disc("""Web links:""", style=indent1_style)
disc("""<link href="http://www.reportlab.com/">ReportLab<link>""", style=styles['Link'])
disc("""Internal link to current PDF document:""", style=indent1_style)
disc("""<link href="summary">ReportLab<link>""", style=styles['Link'])
disc("""External link to a PDF document on the local filesystem:""", style=indent1_style)
disc("""<link href="pdf:C:/john/report.pdf">ReportLab<link>""", style=styles['Link'])
disc("""Improved wrapping support""", style=indent0_style)
disc("""Support for wrapping arbitrary sequence of flowables around an image, using
reportlab.platypus.flowables.ImageAndFlowables (similar to ParagraphAndImage)."""
,style=indent1_style)
disc("""KeepInFrame""", style=indent0_style)
disc("""Sometimes the length of a piece of text you'd like to include in a fixed piece
of page "real estate" is not guaranteed to be constrained to a fixed maximum length.
In these cases, KeepInFrame allows you to specify an appropriate action to take when
the text is too long for the space allocated for it. In particular, it can shrink the text
to fit, mask (truncate) overflowing text, allow the text to overflow into the rest of the document,
or raise an error.""",style=indent1_style)
disc("""Improved convenience features for inserting unicode symbols and other characters
""", style=indent0_style)
disc("""<unichar/> lets you conveniently insert unicode characters using the standard long name
or code point. Characters inserted with the <greek> tags (e.g. <greek>lambda</greek>) or corresponding
entity references (e.g. λ) support arbitrary fonts (rather than only Zapf Dingbats).""",style=indent1_style)
disc("""Improvements to Legending""", style=indent0_style)
disc("""Instead of manual placement, there is now a attachment point (N, S, E, W, etc.), so that
the legend is always automatically positioned correctly relative to the chart. Swatches (the small
sample squares of colour / pattern fill sometimes displayed in the legend) can now be automatically
created from the graph data. Legends can now have automatically-computed totals (useful for
financial applications).""",style=indent1_style)
disc("""More and better ways to place piechart labels""", style=indent0_style)
disc("""New smart algorithms for automatic pie chart label positioning have been added.
You can now produce nice-looking labels without manual positioning even for awkward cases in
big runs of charts.""",style=indent1_style)
disc("""Adjustable piechart slice ordering""", style=indent0_style)
disc("""For example. pie charts with lots of small slices can be configured to alternate thin and
thick slices to help the lagel placememt algorithm work better.""",style=indent1_style)
disc("""Improved spiderplots""", style=indent0_style)
# Noteworthy bug fixes Section #######################
heading3("Noteworthy bug fixes")
disc("""Fixes to TTF splitting (patch from Albertas Agejevas)""")
disc("""This affected some documents using font subsetting""", style=indent0_style)
disc("""Tables with spans improved splitting""")
disc("""Splitting of tables across pages did not work correctly when the table had
row/column spans""", style=indent0_style)
disc("""Fix runtime error affecting keepWithNext""")
| |
import logging
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm import joinedload
from sqlalchemy.sql.expression import desc
from sqlalchemy import or_, Column, func
from flask import flash
from flask.ext.admin._compat import string_types
from flask.ext.admin.tools import ObsoleteAttr
from flask.ext.admin.babel import gettext, ngettext, lazy_gettext
from flask.ext.admin.model import BaseModelView
from flask.ext.admin.actions import action
from flask.ext.admin.contrib.sqlamodel import form, filters, tools
from .typefmt import DEFAULT_FORMATTERS
class ModelView(BaseModelView):
"""
SQLAlchemy model view
Usage sample::
admin = Admin()
admin.add_view(ModelView(User, db.session))
"""
column_hide_backrefs = ObsoleteAttr('column_hide_backrefs',
'hide_backrefs',
True)
"""
Set this to False if you want to see multiselect for model backrefs.
"""
column_auto_select_related = ObsoleteAttr('column_auto_select_related',
'auto_select_related',
True)
"""
Enable automatic detection of displayed foreign keys in this view
and perform automatic joined loading for related models to improve
query performance.
Please note that detection is not recursive: if `__unicode__` method
of related model uses another model to generate string representation, it
will still make separate database call.
"""
column_select_related_list = ObsoleteAttr('column_select_related',
'list_select_related',
None)
"""
List of parameters for SQLAlchemy `subqueryload`. Overrides `column_auto_select_related`
property.
For example::
class PostAdmin(ModelAdmin):
column_select_related_list = ('user', 'city')
You can also use properties::
class PostAdmin(ModelAdmin):
column_select_related_list = (Post.user, Post.city)
Please refer to the `subqueryload` on list of possible values.
"""
column_display_all_relations = ObsoleteAttr('column_display_all_relations',
'list_display_all_relations',
False)
"""
Controls if list view should display all relations, not only many-to-one.
"""
column_searchable_list = ObsoleteAttr('column_searchable_list',
'searchable_columns',
None)
"""
Collection of the searchable columns. Only text-based columns
are searchable (`String`, `Unicode`, `Text`, `UnicodeText`).
Example::
class MyModelView(ModelView):
column_searchable_list = ('name', 'email')
You can also pass columns::
class MyModelView(ModelView):
column_searchable_list = (User.name, User.email)
The following search rules apply:
- If you enter *ZZZ* in the UI search field, it will generate *ILIKE '%ZZZ%'*
statement against searchable columns.
- If you enter multiple words, each word will be searched separately, but
only rows that contain all words will be displayed. For example, searching
for 'abc def' will find all rows that contain 'abc' and 'def' in one or
more columns.
- If you prefix your search term with ^, it will find all rows
that start with ^. So, if you entered *^ZZZ*, *ILIKE 'ZZZ%'* will be used.
- If you prefix your search term with =, it will perform an exact match.
For example, if you entered *=ZZZ*, the statement *ILIKE 'ZZZ'* will be used.
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of :class:`flask.ext.admin.contrib.sqlamodel.filters.BaseFilter` classes.
For example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
or::
class MyModelView(BaseModelView):
column_filters = (BooleanEqualFilter(User.name, 'Name'))
"""
model_form_converter = form.AdminModelConverter
"""
Model form conversion class. Use this to implement custom field conversion logic.
For example::
class MyModelConverter(AdminModelConverter):
pass
class MyAdminView(ModelView):
model_form_converter = MyModelConverter
"""
inline_model_form_converter = form.InlineModelConverter
"""
Inline model conversion class. If you need some kind of post-processing for inline
forms, you can customize behavior by doing something like this::
class MyInlineModelConverter(AdminModelConverter):
def post_process(self, form_class, info):
form_class.value = wtf.TextField('value')
return form_class
class MyAdminView(ModelView):
inline_model_form_converter = MyInlineModelConverter
"""
filter_converter = filters.FilterConverter()
"""
Field to filter converter.
Override this attribute to use non-default converter.
"""
fast_mass_delete = False
"""
If set to `False` and user deletes more than one model using built in action,
all models will be read from the database and then deleted one by one
giving SQLAlchemy a chance to manually cleanup any dependencies (many-to-many
relationships, etc).
If set to `True`, will run a `DELETE` statement which is somewhat faster,
but may leave corrupted data if you forget to configure `DELETE
CASCADE` for your model.
"""
inline_models = None
"""
Inline related-model editing for models with parent-child relations.
Accepts enumerable with one of the following possible values:
1. Child model class::
class MyModelView(ModelView):
inline_models = (Post,)
2. Child model class and additional options::
class MyModelView(ModelView):
inline_models = [(Post, dict(form_columns=['title']))]
3. Django-like ``InlineFormAdmin`` class instance::
class MyInlineModelForm(InlineFormAdmin):
form_columns = ('title', 'date')
class MyModelView(ModelView):
inline_models = (MyInlineModelForm(MyInlineModel),)
You can customize the generated field name by:
1. Using the `form_name` property as a key to the options dictionary:
class MyModelView(ModelView):
inline_models = ((Post, dict(form_label='Hello')))
2. Using forward relation name and `column_labels` property:
class Model1(Base):
pass
class Model2(Base):
# ...
model1 = relation(Model1, backref='models')
class MyModel1View(Base):
inline_models = (Model2,)
column_labels = {'models': 'Hello'}
"""
column_type_formatters = DEFAULT_FORMATTERS
form_choices = None
"""
Map choices to form fields
Example::
class MyModelView(BaseModelView):
form_choices = {'my_form_field': [
('db_value', 'display_value'),
]
"""
def __init__(self, model, session,
name=None, category=None, endpoint=None, url=None):
"""
Constructor.
:param model:
Model class
:param session:
SQLAlchemy session
:param name:
View name. If not set, defaults to the model name
:param category:
Category name
:param endpoint:
Endpoint name. If not set, defaults to the model name
:param url:
Base URL. If not set, defaults to '/admin/' + endpoint
"""
self.session = session
self._search_fields = None
self._search_joins = dict()
self._filter_joins = dict()
if self.form_choices is None:
self.form_choices = {}
super(ModelView, self).__init__(model, name, category, endpoint, url)
# Primary key
self._primary_key = self.scaffold_pk()
if self._primary_key is None:
raise Exception('Model %s does not have primary key.' % self.model.__name__)
# Configuration
if not self.column_select_related_list:
self._auto_joins = self.scaffold_auto_joins()
else:
self._auto_joins = self.column_select_related_list
# Internal API
def _get_model_iterator(self, model=None):
"""
Return property iterator for the model
"""
if model is None:
model = self.model
return model._sa_class_manager.mapper.iterate_properties
# Scaffolding
def scaffold_pk(self):
"""
Return the primary key name from a model
"""
return tools.get_primary_key(self.model)
def get_pk_value(self, model):
"""
Return the PK value from a model object.
"""
return getattr(model, self._primary_key)
def scaffold_list_columns(self):
"""
Return a list of columns from the model.
"""
columns = []
for p in self._get_model_iterator():
# Verify type
if hasattr(p, 'direction'):
if self.column_display_all_relations or p.direction.name == 'MANYTOONE':
columns.append(p.key)
elif hasattr(p, 'columns'):
# TODO: Check for multiple columns
column = p.columns[0]
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns.append(p.key)
return columns
def scaffold_sortable_columns(self):
"""
Return a dictionary of sortable columns.
Key is column name, value is sort column/field.
"""
columns = dict()
for p in self._get_model_iterator():
if hasattr(p, 'columns'):
# Sanity check
if len(p.columns) > 1:
# Multi-column properties are not supported
continue
column = p.columns[0]
# Can't sort on primary or foreign keys by default
if column.foreign_keys:
continue
if not self.column_display_pk and column.primary_key:
continue
columns[p.key] = column
return columns
def _get_columns_for_field(self, field):
if isinstance(field, string_types):
attr = getattr(self.model, field, None)
if field is None:
raise Exception('Field %s was not found.' % field)
else:
attr = field
if (not attr or
not hasattr(attr, 'property') or
not hasattr(attr.property, 'columns') or
not attr.property.columns):
raise Exception('Invalid field %s: does not contains any columns.' % field)
return attr.property.columns
def _need_join(self, table):
return table not in self.model._sa_class_manager.mapper.tables
def init_search(self):
"""
Initialize search. Returns `True` if search is supported for this
view.
For SQLAlchemy, this will initialize internal fields: list of
column objects used for filtering, etc.
"""
if self.column_searchable_list:
self._search_fields = []
self._search_joins = dict()
for p in self.column_searchable_list:
for column in self._get_columns_for_field(p):
column_type = type(column.type).__name__
if not self.is_text_column_type(column_type):
raise Exception('Can only search on text columns. ' +
'Failed to setup search for "%s"' % p)
self._search_fields.append(column)
# If it belongs to different table - add a join
if self._need_join(column.table):
self._search_joins[column.table.name] = column.table
return bool(self.column_searchable_list)
def is_text_column_type(self, name):
"""
Verify if the provided column type is text-based.
:returns:
``True`` for ``String``, ``Unicode``, ``Text``, ``UnicodeText``
"""
return name in ('String', 'Unicode', 'Text', 'UnicodeText')
def scaffold_filters(self, name):
"""
Return list of enabled filters
"""
join_tables = []
if isinstance(name, string_types):
model = self.model
for attribute in name.split('.'):
value = getattr(model, attribute)
if (hasattr(value, 'property') and
hasattr(value.property, 'direction')):
model = value.property.mapper.class_
table = model.__table__
if self._need_join(table):
join_tables.append(table)
attr = value
else:
attr = name
if attr is None:
raise Exception('Failed to find field for filter: %s' % name)
# Figure out filters for related column
if hasattr(attr, 'property') and hasattr(attr.property, 'direction'):
filters = []
for p in self._get_model_iterator(attr.property.mapper.class_):
if hasattr(p, 'columns'):
# TODO: Check for multiple columns
column = p.columns[0]
if column.foreign_keys or column.primary_key:
continue
visible_name = '%s / %s' % (self.get_column_name(attr.prop.table.name),
self.get_column_name(p.key))
type_name = type(column.type).__name__
flt = self.filter_converter.convert(type_name,
column,
visible_name)
if flt:
table = column.table
if join_tables:
self._filter_joins[table.name] = join_tables
elif self._need_join(table.name):
self._filter_joins[table.name] = [table.name]
filters.extend(flt)
return filters
else:
columns = self._get_columns_for_field(attr)
if len(columns) > 1:
raise Exception('Can not filter more than on one column for %s' % name)
column = columns[0]
if self._need_join(column.table) and name not in self.column_labels:
visible_name = '%s / %s' % (
self.get_column_name(column.table.name),
self.get_column_name(column.name)
)
else:
if not isinstance(name, string_types):
visible_name = self.get_column_name(name.property.key)
else:
visible_name = self.get_column_name(name)
type_name = type(column.type).__name__
if join_tables:
self._filter_joins[column.table.name] = join_tables
flt = self.filter_converter.convert(
type_name,
column,
visible_name,
options=self.column_choices.get(name),
)
if flt and not join_tables and self._need_join(column.table):
self._filter_joins[column.table.name] = [column.table]
return flt
def is_valid_filter(self, filter):
"""
Verify that the provided filter object is derived from the
SQLAlchemy-compatible filter class.
:param filter:
Filter object to verify.
"""
return isinstance(filter, filters.BaseSQLAFilter)
def scaffold_form(self):
"""
Create form from the model.
"""
converter = self.model_form_converter(self.session, self)
form_class = form.get_form(self.model, converter,
only=self.form_columns,
exclude=self.form_excluded_columns,
field_args=self.form_args)
if self.inline_models:
form_class = self.scaffold_inline_form_models(form_class)
return form_class
def scaffold_inline_form_models(self, form_class):
"""
Contribute inline models to the form
:param form_class:
Form class
"""
converter = self.model_form_converter(self.session, self)
inline_converter = self.inline_model_form_converter(self.session, self)
for m in self.inline_models:
form_class = inline_converter.contribute(converter,
self.model,
form_class,
m)
return form_class
def scaffold_auto_joins(self):
"""
Return a list of joined tables by going through the
displayed columns.
"""
if not self.column_auto_select_related:
return []
relations = set()
for p in self._get_model_iterator():
if hasattr(p, 'direction'):
# Check if it is pointing to same model
if p.mapper.class_ == self.model:
continue
if p.direction.name == 'MANYTOONE':
relations.add(p.key)
joined = []
for prop, name in self._list_columns:
if prop in relations:
joined.append(getattr(self.model, prop))
return joined
# Database-related API
def get_query(self):
"""
Return a query for the model type.
If you override this method, don't forget to override `get_count_query` as well.
"""
return self.session.query(self.model)
def get_count_query(self):
"""
Return a the count query for the model type
"""
return self.session.query(func.count('*')).select_from(self.model)
def _order_by(self, query, joins, sort_field, sort_desc):
"""
Apply order_by to the query
:param query:
Query
:param joins:
Joins set
:param sort_field:
Sort field
:param sort_desc:
Ascending or descending
"""
# TODO: Preprocessing for joins
# Try to handle it as a string
if isinstance(sort_field, string_types):
# Create automatic join against a table if column name
# contains dot.
if '.' in sort_field:
parts = sort_field.split('.', 1)
if parts[0] not in joins:
query = query.join(parts[0])
joins.add(parts[0])
elif isinstance(sort_field, InstrumentedAttribute):
# SQLAlchemy 0.8+ uses 'parent' as a name
mapper = getattr(sort_field, 'parent', None)
if mapper is None:
# SQLAlchemy 0.7.x uses parententity
mapper = getattr(sort_field, 'parententity', None)
if mapper is not None:
table = mapper.tables[0]
if self._need_join(table) and table.name not in joins:
query = query.join(table)
joins.add(table.name)
elif isinstance(sort_field, Column):
pass
else:
raise TypeError('Wrong argument type')
if sort_field is not None:
if sort_desc:
query = query.order_by(desc(sort_field))
else:
query = query.order_by(sort_field)
return query, joins
def _get_default_order(self):
order = super(ModelView, self)._get_default_order()
if order is not None:
field, direction = order
if isinstance(field, string_types):
field = getattr(self.model, field)
return field, direction
return None
def get_list(self, page, sort_column, sort_desc, search, filters, execute=True):
"""
Return models from the database.
:param page:
Page number
:param sort_column:
Sort column name
:param sort_desc:
Descending or ascending sort
:param search:
Search query
:param execute:
Execute query immediately? Default is `True`
:param filters:
List of filter tuples
"""
# Will contain names of joined tables to avoid duplicate joins
joins = set()
query = self.get_query()
count_query = self.get_count_query()
# Apply search criteria
if self._search_supported and search:
# Apply search-related joins
if self._search_joins:
for jn in self._search_joins.values():
query = query.join(jn)
count_query = count_query.join(jn)
joins = set(self._search_joins.keys())
# Apply terms
terms = search.split(' ')
for term in terms:
if not term:
continue
stmt = tools.parse_like_term(term)
filter_stmt = [c.ilike(stmt) for c in self._search_fields]
query = query.filter(or_(*filter_stmt))
count_query = count_query.filter(or_(*filter_stmt))
# Apply filters
if filters and self._filters:
for idx, value in filters:
flt = self._filters[idx]
# Figure out joins
tbl = flt.column.table.name
join_tables = self._filter_joins.get(tbl, [])
for table in join_tables:
if table.name not in joins:
query = query.join(table)
count_query = count_query.join(table)
joins.add(table.name)
# Apply filter
query = flt.apply(query, value)
count_query = flt.apply(count_query, value)
# Calculate number of rows
count = count_query.scalar()
# Auto join
for j in self._auto_joins:
query = query.options(joinedload(j))
# Sorting
if sort_column is not None:
if sort_column in self._sortable_columns:
sort_field = self._sortable_columns[sort_column]
query, joins = self._order_by(query, joins, sort_field, sort_desc)
else:
order = self._get_default_order()
if order:
query, joins = self._order_by(query, joins, order[0], order[1])
# Pagination
if page is not None:
query = query.offset(page * self.page_size)
query = query.limit(self.page_size)
# Execute if needed
if execute:
query = query.all()
return count, query
def get_one(self, id):
"""
Return a single model by its id.
:param id:
Model id
"""
return self.session.query(self.model).get(id)
# Model handlers
def create_model(self, form):
"""
Create model from form.
:param form:
Form instance
"""
try:
model = self.model()
form.populate_obj(model)
self.session.add(model)
self.on_model_change(form, model)
self.session.commit()
except Exception as ex:
flash(gettext('Failed to create model. %(error)s', error=str(ex)), 'error')
logging.exception('Failed to create model')
self.session.rollback()
return False
else:
self.after_model_change(form, model, True)
return True
def update_model(self, form, model):
"""
Update model from form.
:param form:
Form instance
:param model:
Model instance
"""
try:
form.populate_obj(model)
self.on_model_change(form, model)
self.session.commit()
except Exception as ex:
flash(gettext('Failed to update model. %(error)s', error=str(ex)), 'error')
logging.exception('Failed to update model')
self.session.rollback()
return False
else:
self.after_model_change(form, model, False)
return True
def delete_model(self, model):
"""
Delete model.
:param model:
Model to delete
"""
try:
self.on_model_delete(model)
self.session.flush()
self.session.delete(model)
self.session.commit()
return True
except Exception as ex:
flash(gettext('Failed to delete model. %(error)s', error=str(ex)), 'error')
logging.exception('Failed to delete model')
self.session.rollback()
return False
# Default model actions
def is_action_allowed(self, name):
# Check delete action permission
if name == 'delete' and not self.can_delete:
return False
return super(ModelView, self).is_action_allowed(name)
@action('delete',
lazy_gettext('Delete'),
lazy_gettext('Are you sure you want to delete selected models?'))
def action_delete(self, ids):
try:
model_pk = getattr(self.model, self._primary_key)
query = self.get_query().filter(model_pk.in_(ids))
if self.fast_mass_delete:
count = query.delete(synchronize_session=False)
else:
count = 0
for m in query.all():
self.session.delete(m)
count += 1
self.session.commit()
flash(ngettext('Model was successfully deleted.',
'%(count)s models were successfully deleted.',
count,
count=count))
except Exception as ex:
flash(gettext('Failed to delete models. %(error)s', error=str(ex)), 'error')
| |
# Copyright 2015-2018 Yelp Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import difflib
import glob
import json
import logging
import os
import pkgutil
import re
import subprocess
from functools import lru_cache
from string import Formatter
from typing import List
from typing import Mapping
from typing import Tuple
from typing import Union
import yaml
from service_configuration_lib import read_extra_service_information
from service_configuration_lib import read_yaml_file
from paasta_tools.mesos_tools import mesos_services_running_here
try:
from yaml.cyaml import CSafeDumper as Dumper
except ImportError: # pragma: no cover (no libyaml-dev / pypy)
Dumper = yaml.SafeDumper # type: ignore
from paasta_tools.clusterman import get_clusterman_metrics
from paasta_tools.tron.client import TronClient
from paasta_tools.tron import tron_command_context
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import InstanceConfig
from paasta_tools.utils import InvalidInstanceConfig
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import SystemPaastaConfig
from paasta_tools.utils import load_v2_deployments_json
from paasta_tools.utils import NoConfigurationForServiceError
from paasta_tools.utils import NoDeploymentsAvailable
from paasta_tools.utils import time_cache
from paasta_tools.utils import filter_templates_from_config
from paasta_tools.kubernetes_tools import (
allowlist_denylist_to_requirements,
create_or_find_service_account_name,
limit_size_with_hash,
raw_selectors_to_requirements,
sanitise_kubernetes_name,
to_node_label,
)
from paasta_tools.secret_tools import is_secret_ref
from paasta_tools.secret_tools import is_shared_secret
from paasta_tools.secret_tools import get_secret_name_from_ref
from paasta_tools.secret_tools import SHARED_SECRET_SERVICE
from paasta_tools import monitoring_tools
from paasta_tools.monitoring_tools import list_teams
from typing import Optional
from typing import Dict
from typing import Any
log = logging.getLogger(__name__)
logging.getLogger("tron").setLevel(logging.WARNING)
MASTER_NAMESPACE = "MASTER"
SPACER = "."
VALID_MONITORING_KEYS = set(
json.loads(
pkgutil.get_data("paasta_tools.cli", "schemas/tron_schema.json").decode()
)["definitions"]["job"]["properties"]["monitoring"]["properties"].keys()
)
MESOS_EXECUTOR_NAMES = ("paasta",)
KUBERNETES_EXECUTOR_NAMES = ("paasta",)
KUBERNETES_NAMESPACE = "tron"
DEFAULT_AWS_REGION = "us-west-2"
clusterman_metrics, _ = get_clusterman_metrics()
class TronNotConfigured(Exception):
pass
class InvalidTronConfig(Exception):
pass
class TronConfig(dict):
"""System-level configuration for Tron."""
def __init__(self, config):
super().__init__(config)
def get_cluster_name(self):
""":returns The name of the Tron cluster"""
try:
return self["cluster_name"]
except KeyError:
raise TronNotConfigured(
"Could not find name of Tron cluster in system Tron config"
)
def get_url(self):
""":returns The URL for the Tron master's API"""
try:
return self["url"]
except KeyError:
raise TronNotConfigured(
"Could not find URL of Tron master in system Tron config"
)
def get_tronfig_folder(cluster, soa_dir):
return os.path.join(soa_dir, "tron", cluster)
def load_tron_config():
return TronConfig(load_system_paasta_config().get_tron_config())
def get_tron_client():
return TronClient(load_tron_config().get_url())
def compose_instance(job, action):
return f"{job}{SPACER}{action}"
def decompose_instance(instance):
"""Get (job_name, action_name) from an instance."""
decomposed = instance.split(SPACER)
if len(decomposed) != 2:
raise InvalidInstanceConfig("Invalid instance name: %s" % instance)
return (decomposed[0], decomposed[1])
def decompose_executor_id(executor_id) -> Tuple[str, str, int, str]:
"""(service, job, run_number, action)"""
service, job, str_run_number, action, _ = executor_id.split(SPACER)
return (service, job, int(str_run_number), action)
class StringFormatter(Formatter):
def __init__(self, context=None):
Formatter.__init__(self)
self.context = context
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
return kwds[key]
except KeyError:
return self.context[key]
else:
return Formatter.get_value(key, args, kwds)
def parse_time_variables(command: str, parse_time: datetime.datetime = None) -> str:
"""Parses an input string and uses the Tron-style dateparsing
to replace time variables. Currently supports only the date/time
variables listed in the tron documentation:
http://tron.readthedocs.io/en/latest/command_context.html#built-in-cc
:param input_string: input string to be parsed
:param parse_time: Reference Datetime object to parse the date and time strings, defaults to now.
:returns: A string with the date and time variables replaced
"""
if parse_time is None:
parse_time = datetime.datetime.now()
# We build up a tron context object that has the right
# methods to parse tron-style time syntax
job_context = tron_command_context.JobRunContext(
tron_command_context.CommandContext()
)
# The tron context object needs the run_time attribute set so it knows
# how to interpret the date strings
job_context.job_run.run_time = parse_time
return StringFormatter(job_context).format(command)
@lru_cache(maxsize=1)
def _use_k8s_default() -> bool:
return load_system_paasta_config().get_tron_use_k8s_default()
class TronActionConfig(InstanceConfig):
config_filename_prefix = "tron"
def __init__(
self,
service,
instance,
cluster,
config_dict,
branch_dict,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
super().__init__(
cluster=cluster,
instance=instance,
service=service,
config_dict=config_dict,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
self.job, self.action = decompose_instance(instance)
# Indicate whether this config object is created for validation
self.for_validation = for_validation
def get_cmd(self):
return self.config_dict.get("command")
def get_job_name(self):
return self.job
def get_action_name(self):
return self.action
def get_deploy_group(self) -> Optional[str]:
return self.config_dict.get("deploy_group", None)
def get_docker_url(
self, system_paasta_config: Optional[SystemPaastaConfig] = None
) -> str:
# It's okay for tronfig to contain things that aren't deployed yet - it's normal for developers to
# push tronfig well before the job is scheduled to run, and either they'll deploy the service before
# or get notified when the job fails.
#
# This logic ensures that we can still pass validation and run setup_tron_namespace even if
# there's nothing in deployments.json yet.
return (
""
if not self.get_docker_image()
else super().get_docker_url(system_paasta_config=system_paasta_config)
)
def get_secret_env(self) -> Mapping[str, dict]:
base_env = self.config_dict.get("env", {})
secret_env = {}
for k, v in base_env.items():
if is_secret_ref(v):
secret = get_secret_name_from_ref(v)
sanitised_secret = sanitise_kubernetes_name(secret)
service = (
self.service if not is_shared_secret(v) else SHARED_SECRET_SERVICE
)
sanitised_service = sanitise_kubernetes_name(service)
secret_env[k] = {
"secret_name": f"tron-secret-{sanitised_service}-{sanitised_secret}",
"key": secret,
}
return secret_env
def get_cpu_burst_add(self) -> float:
""" For Tron jobs, we don't let them burst by default, because they
don't represent "real-time" workloads, and should not impact
neighbors """
return self.config_dict.get("cpu_burst_add", 0)
def get_executor(self):
return self.config_dict.get("executor", "paasta")
def get_healthcheck_mode(self, _) -> None:
return None
def get_node(self):
return self.config_dict.get("node")
def get_retries(self):
return self.config_dict.get("retries")
def get_retries_delay(self):
return self.config_dict.get("retries_delay")
def get_requires(self):
return self.config_dict.get("requires")
def get_expected_runtime(self):
return self.config_dict.get("expected_runtime")
def get_triggered_by(self):
return self.config_dict.get("triggered_by", None)
def get_trigger_downstreams(self):
return self.config_dict.get("trigger_downstreams", None)
def get_on_upstream_rerun(self):
return self.config_dict.get("on_upstream_rerun", None)
def get_trigger_timeout(self):
return self.config_dict.get("trigger_timeout", None)
def get_node_selectors(self) -> Dict[str, str]:
raw_selectors: Dict[str, Any] = self.config_dict.get("node_selectors", {}) # type: ignore
node_selectors = {
to_node_label(label): value
for label, value in raw_selectors.items()
if isinstance(value, str)
}
node_selectors["yelp.com/pool"] = self.get_pool()
return node_selectors
def get_node_affinities(self) -> Optional[List[Dict[str, Union[str, List[str]]]]]:
"""Converts deploy_whitelist and deploy_blacklist in node affinities.
note: At the time of writing, `kubectl describe` does not show affinities,
only selectors. To see affinities, use `kubectl get pod -o json` instead.
"""
requirements = allowlist_denylist_to_requirements(
allowlist=self.get_deploy_whitelist(), denylist=self.get_deploy_blacklist(),
)
requirements.extend(
raw_selectors_to_requirements(
raw_selectors=self.config_dict.get("node_selectors", {}), # type: ignore
)
)
if not requirements:
return None
return [
{"key": key, "operator": op, "value": value}
for key, op, value in requirements
]
def get_calculated_constraints(self):
"""Combine all configured Mesos constraints."""
constraints = self.get_constraints()
if constraints is not None:
return constraints
else:
constraints = self.get_extra_constraints()
constraints.extend(
self.get_deploy_constraints(
blacklist=self.get_deploy_blacklist(),
whitelist=self.get_deploy_whitelist(),
# Don't have configs for the paasta cluster
system_deploy_blacklist=[],
system_deploy_whitelist=None,
)
)
constraints.extend(self.get_pool_constraints())
return constraints
def get_nerve_namespace(self) -> None:
return None
def validate(self):
error_msgs = []
error_msgs.extend(super().validate())
# Tron is a little special, because it can *not* have a deploy group
# But only if an action is running via ssh and not via paasta
if (
self.get_deploy_group() is None
and self.get_executor() in MESOS_EXECUTOR_NAMES
):
error_msgs.append(
f"{self.get_job_name()}.{self.get_action_name()} must have a deploy_group set"
)
return error_msgs
class TronJobConfig:
"""Represents a job in Tron, consisting of action(s) and job-level configuration values."""
def __init__(
self,
name: str,
config_dict: Dict[str, Any],
cluster: str,
service: Optional[str] = None,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
for_validation: bool = False,
) -> None:
self.name = name
self.config_dict = config_dict
self.cluster = cluster
self.service = service
self.load_deployments = load_deployments
self.soa_dir = soa_dir
# Indicate whether this config object is created for validation
self.for_validation = for_validation
def get_use_k8s(self) -> bool:
return self.config_dict.get("use_k8s", _use_k8s_default())
def get_name(self):
return self.name
def get_node(self):
return self.config_dict.get("node", "paasta")
def get_schedule(self):
return self.config_dict.get("schedule")
def get_monitoring(self):
srv_monitoring = dict(
monitoring_tools.read_monitoring_config(self.service, soa_dir=self.soa_dir)
)
tron_monitoring = self.config_dict.get("monitoring", {})
srv_monitoring.update(tron_monitoring)
# filter out non-tron monitoring keys
srv_monitoring = {
k: v for k, v in srv_monitoring.items() if k in VALID_MONITORING_KEYS
}
return srv_monitoring
def get_queueing(self):
return self.config_dict.get("queueing")
def get_run_limit(self):
return self.config_dict.get("run_limit")
def get_all_nodes(self):
return self.config_dict.get("all_nodes")
def get_enabled(self):
return self.config_dict.get("enabled")
def get_allow_overlap(self):
return self.config_dict.get("allow_overlap")
def get_max_runtime(self):
return self.config_dict.get("max_runtime")
def get_time_zone(self):
return self.config_dict.get("time_zone")
def get_service(self) -> Optional[str]:
return self.service or self.config_dict.get("service")
def get_deploy_group(self) -> Optional[str]:
return self.config_dict.get("deploy_group", None)
def get_cluster(self):
return self.cluster
def get_expected_runtime(self):
return self.config_dict.get("expected_runtime")
def _get_action_config(self, action_name, action_dict):
action_service = action_dict.setdefault("service", self.get_service())
action_deploy_group = action_dict.setdefault(
"deploy_group", self.get_deploy_group()
)
if action_service and action_deploy_group and self.load_deployments:
try:
deployments_json = load_v2_deployments_json(
service=action_service, soa_dir=self.soa_dir
)
branch_dict = {
"docker_image": deployments_json.get_docker_image_for_deploy_group(
action_deploy_group
),
"git_sha": deployments_json.get_git_sha_for_deploy_group(
action_deploy_group
),
# TODO: add Tron instances when generating deployments json
"desired_state": "start",
"force_bounce": None,
}
except NoDeploymentsAvailable:
log.warning(
f'Docker image unavailable for {action_service}.{self.get_name()}.{action_dict.get("name")}'
" is it deployed yet?"
)
branch_dict = None
else:
branch_dict = None
action_dict["monitoring"] = self.get_monitoring()
return TronActionConfig(
service=action_service,
instance=compose_instance(self.get_name(), action_name),
cluster=self.get_cluster(),
config_dict=action_dict,
branch_dict=branch_dict,
soa_dir=self.soa_dir,
for_validation=self.for_validation,
)
def get_actions(self):
actions = self.config_dict.get("actions")
return [
self._get_action_config(name, action_dict)
for name, action_dict in actions.items()
]
def get_cleanup_action(self):
action_dict = self.config_dict.get("cleanup_action")
if not action_dict:
return None
# TODO: we should keep this trickery outside paasta repo
return self._get_action_config("cleanup", action_dict)
def check_monitoring(self) -> Tuple[bool, str]:
monitoring = self.get_monitoring()
valid_teams = list_teams()
if monitoring is not None:
team_name = monitoring.get("team", None)
if team_name is None:
return False, "Team name is required for monitoring"
elif team_name not in valid_teams:
suggest_teams = difflib.get_close_matches(
word=team_name, possibilities=valid_teams
)
return (
False,
f"Invalid team name: {team_name}. Do you mean one of these: {suggest_teams}",
)
return True, ""
def check_actions(self) -> Tuple[bool, List[str]]:
actions = self.get_actions()
cleanup_action = self.get_cleanup_action()
if cleanup_action:
actions.append(cleanup_action)
checks_passed = True
msgs: List[str] = []
for action in actions:
action_msgs = action.validate()
if action_msgs:
checks_passed = False
msgs.extend(action_msgs)
return checks_passed, msgs
def validate(self) -> List[str]:
_, error_msgs = self.check_actions()
checks = ["check_monitoring"]
for check in checks:
check_passed, check_msg = getattr(self, check)()
if not check_passed:
error_msgs.append(check_msg)
return error_msgs
def __eq__(self, other):
if isinstance(other, type(self)):
return self.config_dict == other.config_dict
return False
def format_volumes(paasta_volume_list):
return [
{
"container_path": v["containerPath"],
"host_path": v["hostPath"],
"mode": v["mode"],
}
for v in paasta_volume_list
]
def format_master_config(master_config, default_volumes, dockercfg_location):
mesos_options = master_config.get("mesos_options", {})
mesos_options.update(
{
"default_volumes": format_volumes(default_volumes),
"dockercfg_location": dockercfg_location,
}
)
master_config["mesos_options"] = mesos_options
k8s_options = master_config.get("k8s_options", {})
if k8s_options:
# Only add default volumes if we already have k8s_options
k8s_options.update(
{"default_volumes": format_volumes(default_volumes),}
)
master_config["k8s_options"] = k8s_options
return master_config
def format_tron_action_dict(action_config: TronActionConfig, use_k8s: bool = False):
"""Generate a dict of tronfig for an action, from the TronActionConfig.
:param job_config: TronActionConfig
"""
executor = action_config.get_executor()
result = {
"command": action_config.get_cmd(),
"executor": executor,
"requires": action_config.get_requires(),
"node": action_config.get_node(),
"retries": action_config.get_retries(),
"retries_delay": action_config.get_retries_delay(),
"expected_runtime": action_config.get_expected_runtime(),
"trigger_downstreams": action_config.get_trigger_downstreams(),
"triggered_by": action_config.get_triggered_by(),
"on_upstream_rerun": action_config.get_on_upstream_rerun(),
"trigger_timeout": action_config.get_trigger_timeout(),
}
# while we're tranisitioning, we want to be able to cleanly fallback to Mesos
# so we'll default to Mesos unless k8s usage is enabled for both the cluster
# and job.
# there are slight differences between k8s and Mesos configs, so we'll translate
# whatever is in soaconfigs to the k8s equivalent here as well.
if executor in KUBERNETES_EXECUTOR_NAMES and use_k8s:
result["executor"] = "kubernetes"
result["secret_env"] = action_config.get_secret_env()
all_env = action_config.get_env()
# For k8s, we do not want secret envvars to be duplicated in both `env` and `secret_env`
result["env"] = {k: v for k, v in all_env.items() if not is_secret_ref(v)}
# for Tron-on-K8s, we want to ship tronjob output through logspout
# such that this output eventually makes it into our per-instance
# log streams automatically
result["env"]["ENABLE_PER_INSTANCE_LOGSPOUT"] = "1"
result["node_selectors"] = action_config.get_node_selectors()
result["node_affinities"] = action_config.get_node_affinities()
# XXX: once we're off mesos we can make get_cap_* return just the cap names as a list
result["cap_add"] = [cap["value"] for cap in action_config.get_cap_add()]
result["cap_drop"] = [cap["value"] for cap in action_config.get_cap_drop()]
result["labels"] = {
"paasta.yelp.com/cluster": action_config.get_cluster(),
"paasta.yelp.com/pool": action_config.get_pool(),
"paasta.yelp.com/service": action_config.get_service(),
"paasta.yelp.com/instance": limit_size_with_hash(
action_config.get_instance(), limit=63, suffix=4,
),
}
# we can hardcode this for now as batches really shouldn't
# need routable IPs - we can deal with adding an interface
# to actually control annotations once there's a usecase
# that requires more dynamicism here
result["annotations"] = {"paasta.yelp.com/routable_ip": "false"}
if action_config.get_team() is not None:
result["labels"]["yelp.com/owner"] = action_config.get_team()
# create_or_find_service_account_name requires k8s credentials, and we don't
# have those available for CI to use (nor do we check these for normal PaaSTA
# services, so we're not doing anything "new" by skipping this)
if (
action_config.get_iam_role_provider() == "aws"
and action_config.get_iam_role()
and not action_config.for_validation
):
result["service_account_name"] = create_or_find_service_account_name(
iam_role=action_config.get_iam_role(), namespace="tron"
)
elif executor in MESOS_EXECUTOR_NAMES:
result["executor"] = "mesos"
constraint_labels = ["attribute", "operator", "value"]
result["constraints"] = [
dict(zip(constraint_labels, constraint))
for constraint in action_config.get_calculated_constraints()
]
result["docker_parameters"] = [
{"key": param["key"], "value": param["value"]}
for param in action_config.format_docker_parameters()
]
result["env"] = action_config.get_env()
# the following config is only valid for k8s/Mesos since we're not running SSH actions
# in a containerized fashion
if executor in (KUBERNETES_EXECUTOR_NAMES + MESOS_EXECUTOR_NAMES):
result["cpus"] = action_config.get_cpus()
result["mem"] = action_config.get_mem()
result["disk"] = action_config.get_disk()
result["extra_volumes"] = format_volumes(action_config.get_extra_volumes())
result["docker_image"] = action_config.get_docker_url()
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def format_tron_job_dict(job_config: TronJobConfig, k8s_enabled: bool = False):
"""Generate a dict of tronfig for a job, from the TronJobConfig.
:param job_config: TronJobConfig
"""
# TODO: this use_k8s flag should be removed once we've fully migrated off of mesos
use_k8s = job_config.get_use_k8s() and k8s_enabled
action_dict = {
action_config.get_action_name(): format_tron_action_dict(
action_config=action_config, use_k8s=use_k8s
)
for action_config in job_config.get_actions()
}
result = {
"node": job_config.get_node(),
"schedule": job_config.get_schedule(),
"actions": action_dict,
"monitoring": job_config.get_monitoring(),
"queueing": job_config.get_queueing(),
"run_limit": job_config.get_run_limit(),
"all_nodes": job_config.get_all_nodes(),
"enabled": job_config.get_enabled(),
"allow_overlap": job_config.get_allow_overlap(),
"max_runtime": job_config.get_max_runtime(),
"time_zone": job_config.get_time_zone(),
"expected_runtime": job_config.get_expected_runtime(),
}
# TODO: this should be directly inlined, but we need to update tron everywhere first so it'll
# be slightly less tedious to just conditionally send this now until we clean things up on the
# removal of all the Mesos code
if job_config.get_use_k8s():
result["use_k8s"] = job_config.get_use_k8s()
cleanup_config = job_config.get_cleanup_action()
if cleanup_config:
cleanup_action = format_tron_action_dict(
action_config=cleanup_config, use_k8s=use_k8s
)
result["cleanup_action"] = cleanup_action
# Only pass non-None values, so Tron will use defaults for others
return {key: val for key, val in result.items() if val is not None}
def load_tron_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> TronActionConfig:
jobs = load_tron_service_config(
service=service,
cluster=cluster,
load_deployments=load_deployments,
soa_dir=soa_dir,
)
requested_job, requested_action = instance.split(".")
for job in jobs:
if job.get_name() == requested_job:
for action in job.get_actions():
if action.get_action_name() == requested_action:
return action
raise NoConfigurationForServiceError(
f"No tron configuration found for {service} {instance}"
)
@time_cache(ttl=5)
def load_tron_service_config(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
return load_tron_service_config_no_cache(
service, cluster, load_deployments, soa_dir, for_validation,
)
def load_tron_service_config_no_cache(
service,
cluster,
load_deployments=True,
soa_dir=DEFAULT_SOA_DIR,
for_validation=False,
):
"""Load all configured jobs for a service, and any additional config values."""
config = read_extra_service_information(
service_name=service, extra_info=f"tron-{cluster}", soa_dir=soa_dir
)
jobs = filter_templates_from_config(config)
job_configs = [
TronJobConfig(
name=name,
service=service,
cluster=cluster,
config_dict=job,
load_deployments=load_deployments,
soa_dir=soa_dir,
for_validation=for_validation,
)
for name, job in jobs.items()
]
return job_configs
def create_complete_master_config(cluster, soa_dir=DEFAULT_SOA_DIR):
system_paasta_config = load_system_paasta_config()
tronfig_folder = get_tronfig_folder(soa_dir=soa_dir, cluster=cluster)
config = read_yaml_file(os.path.join(tronfig_folder, f"MASTER.yaml"))
master_config = format_master_config(
config,
system_paasta_config.get_volumes(),
system_paasta_config.get_dockercfg_location(),
)
return yaml.dump(master_config, Dumper=Dumper, default_flow_style=False)
def create_complete_config(
service: str,
cluster: str,
soa_dir: str = DEFAULT_SOA_DIR,
k8s_enabled: bool = False,
):
"""Generate a namespace configuration file for Tron, for a service."""
job_configs = load_tron_service_config(
service=service, cluster=cluster, load_deployments=True, soa_dir=soa_dir,
)
preproccessed_config = {}
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(
job_config=job_config, k8s_enabled=k8s_enabled
)
for job_config in job_configs
}
return yaml.dump(preproccessed_config, Dumper=Dumper, default_flow_style=False)
def validate_complete_config(
service: str, cluster: str, soa_dir: str = DEFAULT_SOA_DIR
) -> List[str]:
job_configs = load_tron_service_config(
service=service,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
for_validation=True,
)
# PaaSTA-specific validation
for job_config in job_configs:
check_msgs = job_config.validate()
if check_msgs:
return check_msgs
master_config_path = os.path.join(
os.path.abspath(soa_dir), "tron", cluster, MASTER_NAMESPACE + ".yaml"
)
# TODO: remove creating the master config here once we're fully off of mesos
# since we only have it here to verify that the generated tronfig will be valid
# given that the kill-switch will affect PaaSTA's setup_tron_namespace script (we're
# not reading the kill-switch in Tron since it's not easily accessible at the point
# at which we'd like to fallback to Mesos if toggled)
master_config = yaml.safe_load(
create_complete_master_config(cluster=cluster, soa_dir=soa_dir)
)
k8s_enabled_for_cluster = master_config.get("k8s_options", {}).get("enabled", False)
preproccessed_config = {}
# Use Tronfig on generated config from PaaSTA to validate the rest
preproccessed_config["jobs"] = {
job_config.get_name(): format_tron_job_dict(
job_config=job_config, k8s_enabled=k8s_enabled_for_cluster
)
for job_config in job_configs
}
complete_config = yaml.dump(preproccessed_config, Dumper=Dumper)
proc = subprocess.run(
["tronfig", "-", "-V", "-n", service, "-m", master_config_path],
input=complete_config,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
)
if proc.returncode != 0:
process_errors = proc.stderr.strip()
if process_errors: # Error running tronfig
print(proc.stderr)
return [proc.stdout.strip()]
return []
def get_tron_namespaces(cluster, soa_dir):
tron_config_file = f"tron-{cluster}.yaml"
config_dirs = [
_dir[0]
for _dir in os.walk(os.path.abspath(soa_dir))
if tron_config_file in _dir[2]
]
namespaces = [os.path.split(config_dir)[1] for config_dir in config_dirs]
return namespaces
def list_tron_clusters(service: str, soa_dir: str = DEFAULT_SOA_DIR) -> List[str]:
"""Returns the Tron clusters a service is configured to deploy to."""
search_re = r"/tron-([0-9a-z-_]*)\.yaml$"
service_dir = os.path.join(soa_dir, service)
clusters = []
for filename in glob.glob(f"{service_dir}/*.yaml"):
cluster_re_match = re.search(search_re, filename)
if cluster_re_match is not None:
clusters.append(cluster_re_match.group(1))
return clusters
def get_tron_dashboard_for_cluster(cluster: str):
dashboards = load_system_paasta_config().get_dashboard_links()[cluster]
if "Tron" not in dashboards:
raise Exception(f"tron api endpoint is not defined for cluster {cluster}")
return dashboards["Tron"]
def tron_jobs_running_here() -> List[Tuple[str, str, int]]:
return mesos_services_running_here(
framework_filter=lambda fw: fw["name"].startswith("tron"),
parse_service_instance_from_executor_id=parse_service_instance_from_executor_id,
)
def parse_service_instance_from_executor_id(task_id: str) -> Tuple[str, str]:
"""Parses tron mesos task ids, like schematizer.traffic_generator.28414.turnstyle.46da87d7-6092-4ed4-b926-ffa7b21c7785"""
try:
service, job, job_run, action, uuid = task_id.split(".")
except Exception as e:
log.warning(
f"Couldn't parse the mesos task id into a valid tron job: {task_id}: {e}"
)
service, job, action = "unknown_service", "unknown_job", "unknown_action"
return service, f"{job}.{action}"
| |
import numpy as np
import robosuite.utils.transform_utils as T
from robosuite.environments.manipulation.two_arm_env import TwoArmEnv
from robosuite.models.arenas import EmptyArena
from robosuite.models.objects import CylinderObject, PlateWithHoleObject
from robosuite.models.tasks import ManipulationTask
from robosuite.utils.mjcf_utils import CustomMaterial, array_to_string, find_elements
from robosuite.utils.observables import Observable, sensor
class TwoArmPegInHole(TwoArmEnv):
"""
This class corresponds to the peg-in-hole task for two robot arms.
Args:
robots (str or list of str): Specification for specific robot arm(s) to be instantiated within this env
(e.g: "Sawyer" would generate one arm; ["Panda", "Panda", "Sawyer"] would generate three robot arms)
Note: Must be either 2 single single-arm robots or 1 bimanual robot!
env_configuration (str): Specifies how to position the robots within the environment. Can be either:
:`'bimanual'`: Only applicable for bimanual robot setups. Sets up the (single) bimanual robot on the -x
side of the table
:`'single-arm-parallel'`: Only applicable for multi single arm setups. Sets up the (two) single armed
robots next to each other on the -x side of the table
:`'single-arm-opposed'`: Only applicable for multi single arm setups. Sets up the (two) single armed
robots opposed from each others on the opposite +/-y sides of the table.
Note that "default" corresponds to either "bimanual" if a bimanual robot is used or "single-arm-opposed" if two
single-arm robots are used.
controller_configs (str or list of dict): If set, contains relevant controller parameters for creating a
custom controller. Else, uses the default controller for this specific task. Should either be single
dict if same controller is to be used for all robots or else it should be a list of the same length as
"robots" param
gripper_types (str or list of str): type of gripper, used to instantiate gripper models from gripper factory.
For this environment, setting a value other than the default (None) will raise an AssertionError, as
this environment is not meant to be used with any gripper at all.
initialization_noise (dict or list of dict): Dict containing the initialization noise parameters.
The expected keys and corresponding value types are specified below:
:`'magnitude'`: The scale factor of uni-variate random noise applied to each of a robot's given initial
joint positions. Setting this value to `None` or 0.0 results in no noise being applied.
If "gaussian" type of noise is applied then this magnitude scales the standard deviation applied,
If "uniform" type of noise is applied then this magnitude sets the bounds of the sampling range
:`'type'`: Type of noise to apply. Can either specify "gaussian" or "uniform"
Should either be single dict if same noise value is to be used for all robots or else it should be a
list of the same length as "robots" param
:Note: Specifying "default" will automatically use the default noise settings.
Specifying None will automatically create the required dict with "magnitude" set to 0.0.
use_camera_obs (bool or list of bool): if True, every observation for a specific robot includes a rendered
image. Should either be single bool if camera obs value is to be used for all
robots or else it should be a list of the same length as "robots" param
use_object_obs (bool): if True, include object (cube) information in
the observation.
reward_scale (None or float): Scales the normalized reward function by the amount specified.
If None, environment reward remains unnormalized
reward_shaping (bool): if True, use dense rewards.
peg_radius (2-tuple): low and high limits of the (uniformly sampled)
radius of the peg
peg_length (float): length of the peg
has_renderer (bool): If true, render the simulation state in
a viewer instead of headless mode.
has_offscreen_renderer (bool): True if using off-screen rendering
render_camera (str): Name of camera to render if `has_renderer` is True. Setting this value to 'None'
will result in the default angle being applied, which is useful as it can be dragged / panned by
the user using the mouse
render_collision_mesh (bool): True if rendering collision meshes in camera. False otherwise.
render_visual_mesh (bool): True if rendering visual meshes in camera. False otherwise.
render_gpu_device_id (int): corresponds to the GPU device id to use for offscreen rendering.
Defaults to -1, in which case the device will be inferred from environment variables
(GPUS or CUDA_VISIBLE_DEVICES).
control_freq (float): how many control signals to receive in every second. This sets the amount of
simulation time that passes between every action input.
horizon (int): Every episode lasts for exactly @horizon timesteps.
ignore_done (bool): True if never terminating the environment (ignore @horizon).
hard_reset (bool): If True, re-loads model, sim, and render object upon a reset call, else,
only calls sim.reset and resets all robosuite-internal variables
camera_names (str or list of str): name of camera to be rendered. Should either be single str if
same name is to be used for all cameras' rendering or else it should be a list of cameras to render.
:Note: At least one camera must be specified if @use_camera_obs is True.
:Note: To render all robots' cameras of a certain type (e.g.: "robotview" or "eye_in_hand"), use the
convention "all-{name}" (e.g.: "all-robotview") to automatically render all camera images from each
robot's camera list).
camera_heights (int or list of int): height of camera frame. Should either be single int if
same height is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_widths (int or list of int): width of camera frame. Should either be single int if
same width is to be used for all cameras' frames or else it should be a list of the same length as
"camera names" param.
camera_depths (bool or list of bool): True if rendering RGB-D, and RGB otherwise. Should either be single
bool if same depth setting is to be used for all cameras or else it should be a list of the same length as
"camera names" param.
camera_segmentations (None or str or list of str or list of list of str): Camera segmentation(s) to use
for each camera. Valid options are:
`None`: no segmentation sensor used
`'instance'`: segmentation at the class-instance level
`'class'`: segmentation at the class level
`'element'`: segmentation at the per-geom level
If not None, multiple types of segmentations can be specified. A [list of str / str or None] specifies
[multiple / a single] segmentation(s) to use for all cameras. A list of list of str specifies per-camera
segmentation setting(s) to use.
Raises:
AssertionError: [Gripper specified]
ValueError: [Invalid number of robots specified]
ValueError: [Invalid env configuration]
ValueError: [Invalid robots for specified env configuration]
"""
def __init__(
self,
robots,
env_configuration="default",
controller_configs=None,
gripper_types=None,
initialization_noise="default",
use_camera_obs=True,
use_object_obs=True,
reward_scale=1.0,
reward_shaping=False,
peg_radius=(0.015, 0.03),
peg_length=0.13,
has_renderer=False,
has_offscreen_renderer=True,
render_camera="frontview",
render_collision_mesh=False,
render_visual_mesh=True,
render_gpu_device_id=-1,
control_freq=20,
horizon=1000,
ignore_done=False,
hard_reset=True,
camera_names="agentview",
camera_heights=256,
camera_widths=256,
camera_depths=False,
camera_segmentations=None, # {None, instance, class, element}
renderer="mujoco",
renderer_config=None,
):
# Assert that the gripper type is None
assert gripper_types is None, "Tried to specify gripper other than None in TwoArmPegInHole environment!"
# reward configuration
self.reward_scale = reward_scale
self.reward_shaping = reward_shaping
# whether to use ground-truth object states
self.use_object_obs = use_object_obs
# Save peg specs
self.peg_radius = peg_radius
self.peg_length = peg_length
super().__init__(
robots=robots,
env_configuration=env_configuration,
controller_configs=controller_configs,
mount_types="default",
gripper_types=gripper_types,
initialization_noise=initialization_noise,
use_camera_obs=use_camera_obs,
has_renderer=has_renderer,
has_offscreen_renderer=has_offscreen_renderer,
render_camera=render_camera,
render_collision_mesh=render_collision_mesh,
render_visual_mesh=render_visual_mesh,
render_gpu_device_id=render_gpu_device_id,
control_freq=control_freq,
horizon=horizon,
ignore_done=ignore_done,
hard_reset=hard_reset,
camera_names=camera_names,
camera_heights=camera_heights,
camera_widths=camera_widths,
camera_depths=camera_depths,
camera_segmentations=camera_segmentations,
renderer=renderer,
renderer_config=renderer_config,
)
def reward(self, action=None):
"""
Reward function for the task.
Sparse un-normalized reward:
- a discrete reward of 5.0 is provided if the peg is inside the plate's hole
- Note that we enforce that it's inside at an appropriate angle (cos(theta) > 0.95).
Un-normalized summed components if using reward shaping:
- Reaching: in [0, 1], to encourage the arms to approach each other
- Perpendicular Distance: in [0,1], to encourage the arms to approach each other
- Parallel Distance: in [0,1], to encourage the arms to approach each other
- Alignment: in [0, 1], to encourage having the right orientation between the peg and hole.
- Placement: in {0, 1}, nonzero if the peg is in the hole with a relatively correct alignment
Note that the final reward is normalized and scaled by reward_scale / 5.0 as
well so that the max score is equal to reward_scale
"""
reward = 0
# Right location and angle
if self._check_success():
reward = 1.0
# use a shaping reward
if self.reward_shaping:
# Grab relevant values
t, d, cos = self._compute_orientation()
# reaching reward
hole_pos = self.sim.data.body_xpos[self.hole_body_id]
gripper_site_pos = self.sim.data.body_xpos[self.peg_body_id]
dist = np.linalg.norm(gripper_site_pos - hole_pos)
reaching_reward = 1 - np.tanh(1.0 * dist)
reward += reaching_reward
# Orientation reward
reward += 1 - np.tanh(d)
reward += 1 - np.tanh(np.abs(t))
reward += cos
# if we're not reward shaping, scale sparse reward so that the max reward is identical to its dense version
else:
reward *= 5.0
if self.reward_scale is not None:
reward *= self.reward_scale / 5.0
return reward
def _load_model(self):
"""
Loads an xml model, puts it in self.model
"""
super()._load_model()
# Adjust base pose(s) accordingly
if self.env_configuration == "bimanual":
xpos = self.robots[0].robot_model.base_xpos_offset["empty"]
self.robots[0].robot_model.set_base_xpos(xpos)
else:
if self.env_configuration == "single-arm-opposed":
# Set up robots facing towards each other by rotating them from their default position
for robot, rotation in zip(self.robots, (np.pi / 2, -np.pi / 2)):
xpos = robot.robot_model.base_xpos_offset["empty"]
rot = np.array((0, 0, rotation))
xpos = T.euler2mat(rot) @ np.array(xpos)
robot.robot_model.set_base_xpos(xpos)
robot.robot_model.set_base_ori(rot)
else: # "single-arm-parallel" configuration setting
# Set up robots parallel to each other but offset from the center
for robot, offset in zip(self.robots, (-0.25, 0.25)):
xpos = robot.robot_model.base_xpos_offset["empty"]
xpos = np.array(xpos) + np.array((0, offset, 0))
robot.robot_model.set_base_xpos(xpos)
# Add arena and robot
mujoco_arena = EmptyArena()
# Arena always gets set to zero origin
mujoco_arena.set_origin([0, 0, 0])
# Modify default agentview camera
mujoco_arena.set_camera(
camera_name="agentview",
pos=[1.0666432116509934, 1.4903257668114777e-08, 2.0563394967349096],
quat=[0.6530979871749878, 0.27104058861732483, 0.27104055881500244, 0.6530978679656982],
)
# initialize objects of interest
self.hole = PlateWithHoleObject(name="hole")
tex_attrib = {
"type": "cube",
}
mat_attrib = {
"texrepeat": "1 1",
"specular": "0.4",
"shininess": "0.1",
}
greenwood = CustomMaterial(
texture="WoodGreen",
tex_name="greenwood",
mat_name="greenwood_mat",
tex_attrib=tex_attrib,
mat_attrib=mat_attrib,
)
self.peg = CylinderObject(
name="peg",
size_min=(self.peg_radius[0], self.peg_length),
size_max=(self.peg_radius[1], self.peg_length),
material=greenwood,
rgba=[0, 1, 0, 1],
joints=None,
)
# Load hole object
hole_obj = self.hole.get_obj()
hole_obj.set("quat", "0 0 0.707 0.707")
hole_obj.set("pos", "0.11 0 0.17")
# Load peg object
peg_obj = self.peg.get_obj()
peg_obj.set("pos", array_to_string((0, 0, self.peg_length)))
# Append appropriate objects to arms
if self.env_configuration == "bimanual":
r_eef, l_eef = [self.robots[0].robot_model.eef_name[arm] for arm in self.robots[0].arms]
r_model, l_model = [self.robots[0].robot_model, self.robots[0].robot_model]
else:
r_eef, l_eef = [robot.robot_model.eef_name for robot in self.robots]
r_model, l_model = [self.robots[0].robot_model, self.robots[1].robot_model]
r_body = find_elements(root=r_model.worldbody, tags="body", attribs={"name": r_eef}, return_first=True)
l_body = find_elements(root=l_model.worldbody, tags="body", attribs={"name": l_eef}, return_first=True)
r_body.append(peg_obj)
l_body.append(hole_obj)
# task includes arena, robot, and objects of interest
# We don't add peg and hole directly since they were already appended to the robots
self.model = ManipulationTask(
mujoco_arena=mujoco_arena,
mujoco_robots=[robot.robot_model for robot in self.robots],
)
# Make sure to add relevant assets from peg and hole objects
self.model.merge_assets(self.hole)
self.model.merge_assets(self.peg)
def _setup_references(self):
"""
Sets up references to important components. A reference is typically an
index or a list of indices that point to the corresponding elements
in a flatten array, which is how MuJoCo stores physical simulation data.
"""
super()._setup_references()
# Additional object references from this env
self.hole_body_id = self.sim.model.body_name2id(self.hole.root_body)
self.peg_body_id = self.sim.model.body_name2id(self.peg.root_body)
def _setup_observables(self):
"""
Sets up observables to be used for this environment. Creates object-based observables if enabled
Returns:
OrderedDict: Dictionary mapping observable names to its corresponding Observable object
"""
observables = super()._setup_observables()
# low-level object information
if self.use_object_obs:
# Get robot prefix and define observables modality
if self.env_configuration == "bimanual":
pf0 = self.robots[0].robot_model.naming_prefix + "right_"
pf1 = self.robots[0].robot_model.naming_prefix + "left_"
else:
pf0 = self.robots[0].robot_model.naming_prefix
pf1 = self.robots[1].robot_model.naming_prefix
modality = "object"
# position and rotation of peg and hole
@sensor(modality=modality)
def hole_pos(obs_cache):
return np.array(self.sim.data.body_xpos[self.hole_body_id])
@sensor(modality=modality)
def hole_quat(obs_cache):
return T.convert_quat(self.sim.data.body_xquat[self.hole_body_id], to="xyzw")
@sensor(modality=modality)
def peg_to_hole(obs_cache):
return (
obs_cache["hole_pos"] - np.array(self.sim.data.body_xpos[self.peg_body_id])
if "hole_pos" in obs_cache
else np.zeros(3)
)
@sensor(modality=modality)
def peg_quat(obs_cache):
return T.convert_quat(self.sim.data.body_xquat[self.peg_body_id], to="xyzw")
# Relative orientation parameters
@sensor(modality=modality)
def angle(obs_cache):
t, d, cos = self._compute_orientation()
obs_cache["t"] = t
obs_cache["d"] = d
return cos
@sensor(modality=modality)
def t(obs_cache):
return obs_cache["t"] if "t" in obs_cache else 0.0
@sensor(modality=modality)
def d(obs_cache):
return obs_cache["d"] if "d" in obs_cache else 0.0
sensors = [hole_pos, hole_quat, peg_to_hole, peg_quat, angle, t, d]
names = [s.__name__ for s in sensors]
# Create observables
for name, s in zip(names, sensors):
observables[name] = Observable(
name=name,
sensor=s,
sampling_rate=self.control_freq,
)
return observables
def _reset_internal(self):
"""
Resets simulation internal configurations.
"""
super()._reset_internal()
def _check_success(self):
"""
Check if peg is successfully aligned and placed within the hole
Returns:
bool: True if peg is placed in hole correctly
"""
t, d, cos = self._compute_orientation()
return d < 0.06 and -0.12 <= t <= 0.14 and cos > 0.95
def _compute_orientation(self):
"""
Helper function to return the relative positions between the hole and the peg.
In particular, the intersection of the line defined by the peg and the plane
defined by the hole is computed; the parallel distance, perpendicular distance,
and angle are returned.
Returns:
3-tuple:
- (float): parallel distance
- (float): perpendicular distance
- (float): angle
"""
peg_mat = self.sim.data.body_xmat[self.peg_body_id]
peg_mat.shape = (3, 3)
peg_pos = self.sim.data.body_xpos[self.peg_body_id]
hole_pos = self.sim.data.body_xpos[self.hole_body_id]
hole_mat = self.sim.data.body_xmat[self.hole_body_id]
hole_mat.shape = (3, 3)
v = peg_mat @ np.array([0, 0, 1])
v = v / np.linalg.norm(v)
center = hole_pos + hole_mat @ np.array([0.1, 0, 0])
t = (center - peg_pos) @ v / (np.linalg.norm(v) ** 2)
d = np.linalg.norm(np.cross(v, peg_pos - center)) / np.linalg.norm(v)
hole_normal = hole_mat @ np.array([0, 0, 1])
return (
t,
d,
abs(np.dot(hole_normal, v) / np.linalg.norm(hole_normal) / np.linalg.norm(v)),
)
def _peg_pose_in_hole_frame(self):
"""
A helper function that takes in a named data field and returns the pose of that
object in the base frame.
Returns:
np.array: (4,4) matrix corresponding to the pose of the peg in the hole frame
"""
# World frame
peg_pos_in_world = self.sim.data.get_body_xpos(self.peg.root_body)
peg_rot_in_world = self.sim.data.get_body_xmat(self.peg.root_body).reshape((3, 3))
peg_pose_in_world = T.make_pose(peg_pos_in_world, peg_rot_in_world)
# World frame
hole_pos_in_world = self.sim.data.get_body_xpos(self.hole.root_body)
hole_rot_in_world = self.sim.data.get_body_xmat(self.hole.root_body).reshape((3, 3))
hole_pose_in_world = T.make_pose(hole_pos_in_world, hole_rot_in_world)
world_pose_in_hole = T.pose_inv(hole_pose_in_world)
peg_pose_in_hole = T.pose_in_A_to_pose_in_B(peg_pose_in_world, world_pose_in_hole)
return peg_pose_in_hole
| |
from __future__ import unicode_literals
from .abc import ABCIE
from .abc7news import Abc7NewsIE
from .academicearth import AcademicEarthCourseIE
from .addanime import AddAnimeIE
from .adobetv import AdobeTVIE
from .adultswim import AdultSwimIE
from .aftenposten import AftenpostenIE
from .aftonbladet import AftonbladetIE
from .airmozilla import AirMozillaIE
from .aljazeera import AlJazeeraIE
from .alphaporno import AlphaPornoIE
from .anitube import AnitubeIE
from .anysex import AnySexIE
from .aol import AolIE
from .allocine import AllocineIE
from .aparat import AparatIE
from .appletrailers import AppleTrailersIE
from .archiveorg import ArchiveOrgIE
from .ard import ARDIE, ARDMediathekIE
from .arte import (
ArteTvIE,
ArteTVPlus7IE,
ArteTVCreativeIE,
ArteTVConcertIE,
ArteTVFutureIE,
ArteTVDDCIE,
ArteTVEmbedIE,
)
from .atresplayer import AtresPlayerIE
from .atttechchannel import ATTTechChannelIE
from .audiomack import AudiomackIE, AudiomackAlbumIE
from .azubu import AzubuIE
from .bambuser import BambuserIE, BambuserChannelIE
from .bandcamp import BandcampIE, BandcampAlbumIE
from .bbccouk import BBCCoUkIE
from .beeg import BeegIE
from .behindkink import BehindKinkIE
from .beatportpro import BeatportProIE
from .bet import BetIE
from .bild import BildIE
from .bilibili import BiliBiliIE
from .blinkx import BlinkxIE
from .bliptv import BlipTVIE, BlipTVUserIE
from .bloomberg import BloombergIE
from .bpb import BpbIE
from .br import BRIE
from .breakcom import BreakIE
from .brightcove import BrightcoveIE
from .buzzfeed import BuzzFeedIE
from .byutv import BYUtvIE
from .c56 import C56IE
from .camdemy import (
CamdemyIE,
CamdemyFolderIE
)
from .canal13cl import Canal13clIE
from .canalplus import CanalplusIE
from .canalc2 import Canalc2IE
from .cbs import CBSIE
from .cbsnews import CBSNewsIE
from .cbssports import CBSSportsIE
from .ccc import CCCIE
from .ceskatelevize import CeskaTelevizeIE
from .channel9 import Channel9IE
from .chilloutzone import ChilloutzoneIE
from .chirbit import (
ChirbitIE,
ChirbitProfileIE,
)
from .cinchcast import CinchcastIE
from .clipfish import ClipfishIE
from .cliphunter import CliphunterIE
from .clipsyndicate import ClipsyndicateIE
from .cloudy import CloudyIE
from .clubic import ClubicIE
from .cmt import CMTIE
from .cnet import CNETIE
from .cnn import (
CNNIE,
CNNBlogsIE,
CNNArticleIE,
)
from .collegehumor import CollegeHumorIE
from .collegerama import CollegeRamaIE
from .comedycentral import ComedyCentralIE, ComedyCentralShowsIE
from .comcarcoff import ComCarCoffIE
from .commonmistakes import CommonMistakesIE, UnicodeBOMIE
from .condenast import CondeNastIE
from .cracked import CrackedIE
from .criterion import CriterionIE
from .crunchyroll import (
CrunchyrollIE,
CrunchyrollShowPlaylistIE
)
from .cspan import CSpanIE
from .ctsnews import CtsNewsIE
from .dailymotion import (
DailymotionIE,
DailymotionPlaylistIE,
DailymotionUserIE,
)
from .daum import DaumIE
from .dbtv import DBTVIE
from .dctp import DctpTvIE
from .deezer import DeezerPlaylistIE
from .dfb import DFBIE
from .dotsub import DotsubIE
from .douyutv import DouyuTVIE
from .dreisat import DreiSatIE
from .drbonanza import DRBonanzaIE
from .drtuber import DrTuberIE
from .drtv import DRTVIE
from .dvtv import DVTVIE
from .dump import DumpIE
from .defense import DefenseGouvFrIE
from .discovery import DiscoveryIE
from .divxstage import DivxStageIE
from .dropbox import DropboxIE
from .eagleplatform import EaglePlatformIE
from .ebaumsworld import EbaumsWorldIE
from .echomsk import EchoMskIE
from .ehow import EHowIE
from .eighttracks import EightTracksIE
from .einthusan import EinthusanIE
from .eitb import EitbIE
from .ellentv import (
EllenTVIE,
EllenTVClipsIE,
)
from .elpais import ElPaisIE
from .embedly import EmbedlyIE
from .empflix import EMPFlixIE
from .engadget import EngadgetIE
from .eporner import EpornerIE
from .eroprofile import EroProfileIE
from .escapist import EscapistIE
from .everyonesmixtape import EveryonesMixtapeIE
from .exfm import ExfmIE
from .expotv import ExpoTVIE
from .extremetube import ExtremeTubeIE
from .facebook import FacebookIE
from .faz import FazIE
from .fc2 import FC2IE
from .firedrive import FiredriveIE
from .firstpost import FirstpostIE
from .firsttv import FirstTVIE
from .fivemin import FiveMinIE
from .fktv import (
FKTVIE,
FKTVPosteckeIE,
)
from .flickr import FlickrIE
from .folketinget import FolketingetIE
from .footyroom import FootyRoomIE
from .fourtube import FourTubeIE
from .foxgay import FoxgayIE
from .foxnews import FoxNewsIE
from .franceculture import FranceCultureIE
from .franceinter import FranceInterIE
from .francetv import (
PluzzIE,
FranceTvInfoIE,
FranceTVIE,
GenerationQuoiIE,
CultureboxIE,
)
from .freesound import FreesoundIE
from .freespeech import FreespeechIE
from .freevideo import FreeVideoIE
from .funnyordie import FunnyOrDieIE
from .gamekings import GamekingsIE
from .gameone import (
GameOneIE,
GameOnePlaylistIE,
)
from .gamespot import GameSpotIE
from .gamestar import GameStarIE
from .gametrailers import GametrailersIE
from .gazeta import GazetaIE
from .gdcvault import GDCVaultIE
from .generic import GenericIE
from .giantbomb import GiantBombIE
from .giga import GigaIE
from .glide import GlideIE
from .globo import GloboIE
from .godtube import GodTubeIE
from .goldenmoustache import GoldenMoustacheIE
from .golem import GolemIE
from .googleplus import GooglePlusIE
from .googlesearch import GoogleSearchIE
from .gorillavid import GorillaVidIE
from .goshgay import GoshgayIE
from .grooveshark import GroovesharkIE
from .groupon import GrouponIE
from .hark import HarkIE
from .hearthisat import HearThisAtIE
from .heise import HeiseIE
from .hellporno import HellPornoIE
from .helsinki import HelsinkiIE
from .hentaistigma import HentaiStigmaIE
from .historicfilms import HistoricFilmsIE
from .history import HistoryIE
from .hitbox import HitboxIE, HitboxLiveIE
from .hornbunny import HornBunnyIE
from .hostingbulk import HostingBulkIE
from .hotnewhiphop import HotNewHipHopIE
from .howcast import HowcastIE
from .howstuffworks import HowStuffWorksIE
from .huffpost import HuffPostIE
from .hypem import HypemIE
from .iconosquare import IconosquareIE
from .ign import IGNIE, OneUPIE
from .imdb import (
ImdbIE,
ImdbListIE
)
from .imgur import ImgurIE
from .ina import InaIE
from .infoq import InfoQIE
from .instagram import InstagramIE, InstagramUserIE
from .internetvideoarchive import InternetVideoArchiveIE
from .iprima import IPrimaIE
from .ivi import (
IviIE,
IviCompilationIE
)
from .izlesene import IzleseneIE
from .jadorecettepub import JadoreCettePubIE
from .jeuxvideo import JeuxVideoIE
from .jove import JoveIE
from .jukebox import JukeboxIE
from .jpopsukitv import JpopsukiIE
from .kaltura import KalturaIE
from .kanalplay import KanalPlayIE
from .kankan import KankanIE
from .karaoketv import KaraoketvIE
from .keezmovies import KeezMoviesIE
from .khanacademy import KhanAcademyIE
from .kickstarter import KickStarterIE
from .keek import KeekIE
from .kontrtube import KontrTubeIE
from .krasview import KrasViewIE
from .ku6 import Ku6IE
from .la7 import LA7IE
from .laola1tv import Laola1TvIE
from .letv import (
LetvIE,
LetvTvIE,
LetvPlaylistIE
)
from .libsyn import LibsynIE
from .lifenews import LifeNewsIE
from .liveleak import LiveLeakIE
from .livestream import (
LivestreamIE,
LivestreamOriginalIE,
LivestreamShortenerIE,
)
from .lnkgo import LnkGoIE
from .lrt import LRTIE
from .lynda import (
LyndaIE,
LyndaCourseIE
)
from .m6 import M6IE
from .macgamestore import MacGameStoreIE
from .mailru import MailRuIE
from .malemotion import MalemotionIE
from .mdr import MDRIE
from .metacafe import MetacafeIE
from .metacritic import MetacriticIE
from .mgoon import MgoonIE
from .minhateca import MinhatecaIE
from .ministrygrid import MinistryGridIE
from .mit import TechTVMITIE, MITIE, OCWMITIE
from .mitele import MiTeleIE
from .mixcloud import MixcloudIE
from .mlb import MLBIE
from .mpora import MporaIE
from .moevideo import MoeVideoIE
from .mofosex import MofosexIE
from .mojvideo import MojvideoIE
from .moniker import MonikerIE
from .mooshare import MooshareIE
from .morningstar import MorningstarIE
from .motherless import MotherlessIE
from .motorsport import MotorsportIE
from .movieclips import MovieClipsIE
from .moviezine import MoviezineIE
from .movshare import MovShareIE
from .mtv import (
MTVIE,
MTVServicesEmbeddedIE,
MTVIggyIE,
)
from .muenchentv import MuenchenTVIE
from .musicplayon import MusicPlayOnIE
from .musicvault import MusicVaultIE
from .muzu import MuzuTVIE
from .myspace import MySpaceIE, MySpaceAlbumIE
from .myspass import MySpassIE
from .myvideo import MyVideoIE
from .myvidster import MyVidsterIE
from .nationalgeographic import NationalGeographicIE
from .naver import NaverIE
from .nba import NBAIE
from .nbc import (
NBCIE,
NBCNewsIE,
)
from .ndr import NDRIE
from .ndtv import NDTVIE
from .netzkino import NetzkinoIE
from .nerdcubed import NerdCubedFeedIE
from .nerdist import NerdistIE
from .newgrounds import NewgroundsIE
from .newstube import NewstubeIE
from .nextmedia import (
NextMediaIE,
NextMediaActionNewsIE,
AppleDailyRealtimeNewsIE,
AppleDailyAnimationNewsIE
)
from .nfb import NFBIE
from .nfl import NFLIE
from .nhl import (
NHLIE,
NHLNewsIE,
NHLVideocenterIE,
)
from .niconico import NiconicoIE, NiconicoPlaylistIE
from .ninegag import NineGagIE
from .noco import NocoIE
from .normalboots import NormalbootsIE
from .nosvideo import NosVideoIE
from .novamov import NovaMovIE
from .nowness import NownessIE
from .nowvideo import NowVideoIE
from .npo import (
NPOIE,
NPOLiveIE,
NPORadioIE,
NPORadioFragmentIE,
TegenlichtVproIE,
)
from .nrk import (
NRKIE,
NRKPlaylistIE,
NRKTVIE,
)
from .ntvde import NTVDeIE
from .ntvru import NTVRuIE
from .nytimes import NYTimesIE
from .nuvid import NuvidIE
from .odnoklassniki import OdnoklassnikiIE
from .oktoberfesttv import OktoberfestTVIE
from .ooyala import OoyalaIE
from .openfilm import OpenFilmIE
from .orf import (
ORFTVthekIE,
ORFOE1IE,
ORFFM4IE,
ORFIPTVIE,
)
from .parliamentliveuk import ParliamentLiveUKIE
from .patreon import PatreonIE
from .pbs import PBSIE
from .phoenix import PhoenixIE
from .photobucket import PhotobucketIE
from .planetaplay import PlanetaPlayIE
from .pladform import PladformIE
from .played import PlayedIE
from .playfm import PlayFMIE
from .playvid import PlayvidIE
from .playwire import PlaywireIE
from .podomatic import PodomaticIE
from .pornhd import PornHdIE
from .pornhub import (
PornHubIE,
PornHubPlaylistIE,
)
from .pornotube import PornotubeIE
from .pornoxo import PornoXOIE
from .primesharetv import PrimeShareTVIE
from .promptfile import PromptFileIE
from .prosiebensat1 import ProSiebenSat1IE
from .puls4 import Puls4IE
from .pyvideo import PyvideoIE
from .quickvid import QuickVidIE
from .r7 import R7IE
from .radiode import RadioDeIE
from .radiobremen import RadioBremenIE
from .radiofrance import RadioFranceIE
from .rai import RaiIE
from .rbmaradio import RBMARadioIE
from .redtube import RedTubeIE
from .restudy import RestudyIE
from .reverbnation import ReverbNationIE
from .ringtv import RingTVIE
from .ro220 import Ro220IE
from .rottentomatoes import RottenTomatoesIE
from .roxwel import RoxwelIE
from .rtbf import RTBFIE
from .rte import RteIE
from .rtlnl import RtlNlIE
from .rtlnow import RTLnowIE
from .rtl2 import RTL2IE
from .rtp import RTPIE
from .rts import RTSIE
from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE
from .ruhd import RUHDIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
RutubeEmbedIE,
RutubeMovieIE,
RutubePersonIE,
)
from .rutv import RUTVIE
from .sandia import SandiaIE
from .sapo import SapoIE
from .savefrom import SaveFromIE
from .sbs import SBSIE
from .scivee import SciVeeIE
from .screencast import ScreencastIE
from .screencastomatic import ScreencastOMaticIE
from .screenwavemedia import CinemassacreIE, ScreenwaveMediaIE, TeamFourIE
from .servingsys import ServingSysIE
from .sexu import SexuIE
from .sexykarma import SexyKarmaIE
from .shared import SharedIE
from .sharesix import ShareSixIE
from .sina import SinaIE
from .slideshare import SlideshareIE
from .slutload import SlutloadIE
from .smotri import (
SmotriIE,
SmotriCommunityIE,
SmotriUserIE,
SmotriBroadcastIE,
)
from .snotr import SnotrIE
from .sockshare import SockshareIE
from .sohu import SohuIE
from .soundcloud import (
SoundcloudIE,
SoundcloudSetIE,
SoundcloudUserIE,
SoundcloudPlaylistIE
)
from .soundgasm import (
SoundgasmIE,
SoundgasmProfileIE
)
from .southpark import (
SouthParkIE,
SouthparkDeIE,
)
from .space import SpaceIE
from .spankwire import SpankwireIE
from .spiegel import SpiegelIE, SpiegelArticleIE
from .spiegeltv import SpiegeltvIE
from .spike import SpikeIE
from .sport5 import Sport5IE
from .sportbox import SportBoxIE
from .sportdeutschland import SportDeutschlandIE
from .srmediathek import SRMediathekIE
from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE
from .steam import SteamIE
from .streamcloud import StreamcloudIE
from .streamcz import StreamCZIE
from .streetvoice import StreetVoiceIE
from .sunporno import SunPornoIE
from .svtplay import SVTPlayIE
from .swrmediathek import SWRMediathekIE
from .syfy import SyfyIE
from .sztvhu import SztvHuIE
from .tagesschau import TagesschauIE
from .tapely import TapelyIE
from .tass import TassIE
from .teachertube import (
TeacherTubeIE,
TeacherTubeUserIE,
)
from .teachingchannel import TeachingChannelIE
from .teamcoco import TeamcocoIE
from .techtalks import TechTalksIE
from .ted import TEDIE
from .telebruxelles import TeleBruxellesIE
from .telecinco import TelecincoIE
from .telemb import TeleMBIE
from .teletask import TeleTaskIE
from .tenplay import TenPlayIE
from .testurl import TestURLIE
from .testtube import TestTubeIE
from .tf1 import TF1IE
from .theonion import TheOnionIE
from .theplatform import ThePlatformIE
from .thesixtyone import TheSixtyOneIE
from .thisav import ThisAVIE
from .tinypic import TinyPicIE
from .tlc import TlcIE, TlcDeIE
from .tmz import TMZIE
from .tnaflix import TNAFlixIE
from .thvideo import (
THVideoIE,
THVideoPlaylistIE
)
from .toutv import TouTvIE
from .toypics import ToypicsUserIE, ToypicsIE
from .traileraddict import TrailerAddictIE
from .trilulilu import TriluliluIE
from .trutube import TruTubeIE
from .tube8 import Tube8IE
from .tudou import TudouIE
from .tumblr import TumblrIE
from .tunein import TuneInIE
from .turbo import TurboIE
from .tutv import TutvIE
from .tv4 import TV4IE
from .tvigle import TvigleIE
from .tvp import TvpIE, TvpSeriesIE
from .tvplay import TVPlayIE
from .tweakers import TweakersIE
from .twentyfourvideo import TwentyFourVideoIE
from .twitch import (
TwitchVideoIE,
TwitchChapterIE,
TwitchVodIE,
TwitchProfileIE,
TwitchPastBroadcastsIE,
TwitchBookmarksIE,
TwitchStreamIE,
)
from .ubu import UbuIE
from .udemy import (
UdemyIE,
UdemyCourseIE
)
from .ultimedia import UltimediaIE
from .unistra import UnistraIE
from .urort import UrortIE
from .ustream import UstreamIE, UstreamChannelIE
from .vbox7 import Vbox7IE
from .veehd import VeeHDIE
from .veoh import VeohIE
from .vesti import VestiIE
from .vevo import VevoIE
from .vgtv import VGTVIE
from .vh1 import VH1IE
from .vice import ViceIE
from .viddler import ViddlerIE
from .videobam import VideoBamIE
from .videodetective import VideoDetectiveIE
from .videolecturesnet import VideoLecturesNetIE
from .videofyme import VideofyMeIE
from .videomega import VideoMegaIE
from .videopremium import VideoPremiumIE
from .videott import VideoTtIE
from .videoweed import VideoWeedIE
from .vidme import VidmeIE
from .vidzi import VidziIE
from .vier import VierIE, VierVideosIE
from .viewster import ViewsterIE
from .vimeo import (
VimeoIE,
VimeoAlbumIE,
VimeoChannelIE,
VimeoGroupsIE,
VimeoLikesIE,
VimeoReviewIE,
VimeoUserIE,
VimeoWatchLaterIE,
)
from .vimple import VimpleIE
from .vine import (
VineIE,
VineUserIE,
)
from .viki import VikiIE
from .vk import (
VKIE,
VKUserVideosIE,
)
from .vodlocker import VodlockerIE
from .vporn import VpornIE
from .vrt import VRTIE
from .vube import VubeIE
from .vuclip import VuClipIE
from .vulture import VultureIE
from .walla import WallaIE
from .washingtonpost import WashingtonPostIE
from .wat import WatIE
from .wayofthemaster import WayOfTheMasterIE
from .wdr import (
WDRIE,
WDRMobileIE,
WDRMausIE,
)
from .webofstories import WebOfStoriesIE
from .weibo import WeiboIE
from .wimp import WimpIE
from .wistia import WistiaIE
from .worldstarhiphop import WorldStarHipHopIE
from .wrzuta import WrzutaIE
from .wsj import WSJIE
from .xbef import XBefIE
from .xboxclips import XboxClipsIE
from .xhamster import XHamsterIE
from .xminus import XMinusIE
from .xnxx import XNXXIE
from .xvideos import XVideosIE
from .xtube import XTubeUserIE, XTubeIE
from .xuite import XuiteIE
from .xxxymovies import XXXYMoviesIE
from .yahoo import (
YahooIE,
YahooSearchIE,
)
from .yam import YamIE
from .yandexmusic import (
YandexMusicTrackIE,
YandexMusicAlbumIE,
YandexMusicPlaylistIE,
)
from .yesjapan import YesJapanIE
from .ynet import YnetIE
from .youjizz import YouJizzIE
from .youku import YoukuIE
from .youporn import YouPornIE
from .yourupload import YourUploadIE
from .youtube import (
YoutubeIE,
YoutubeChannelIE,
YoutubeFavouritesIE,
YoutubeHistoryIE,
YoutubePlaylistIE,
YoutubeRecommendedIE,
YoutubeSearchDateIE,
YoutubeSearchIE,
YoutubeSearchURLIE,
YoutubeShowIE,
YoutubeSubscriptionsIE,
YoutubeTruncatedIDIE,
YoutubeTruncatedURLIE,
YoutubeUserIE,
YoutubeWatchLaterIE,
)
from .zapiks import ZapiksIE
from .zdf import ZDFIE, ZDFChannelIE
from .zingmp3 import (
ZingMp3SongIE,
ZingMp3AlbumIE,
)
_ALL_CLASSES = [
klass
for name, klass in globals().items()
if name.endswith('IE') and name != 'GenericIE'
]
_ALL_CLASSES.append(GenericIE)
def gen_extractors():
""" Return a list of an instance of every supported extractor.
The order does matter; the first extractor matched is the one handling the URL.
"""
return [klass() for klass in _ALL_CLASSES]
def list_extractors(age_limit):
"""
Return a list of extractors that are suitable for the given age,
sorted by extractor ID.
"""
return sorted(
filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
key=lambda ie: ie.IE_NAME.lower())
def get_info_extractor(ie_name):
"""Returns the info extractor class with the given ie_name"""
return globals()[ie_name + 'IE']
| |
#!/usr/bin/env python3
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import itertools
import json
import os
import glob
import platform
import re
import shutil
import sys
import tempfile
_HERE_PATH = os.path.dirname(__file__)
_SRC_PATH = os.path.normpath(os.path.join(_HERE_PATH, '..', '..', '..', '..'))
_CWD = os.getcwd() # NOTE(dbeam): this is typically out/<gn_name>/.
sys.path.append(os.path.join(_SRC_PATH, 'third_party', 'node'))
import node
import node_modules
# These files are already combined and minified.
_BASE_EXCLUDES = []
for excluded_file in [
'resources/polymer/v1_0/web-animations-js/web-animations-next-lite.min.js',
'resources/css/roboto.css',
'resources/css/text_defaults.css',
'resources/css/text_defaults_md.css',
'resources/mojo/mojo/public/js/mojo_bindings_lite.html',
'resources/mojo/mojo/public/mojom/base/time.mojom.html',
'resources/mojo/mojo/public/mojom/base/time.mojom-lite.js',
'resources/mojo/chromeos/services/network_config/public/mojom/network_types.mojom.html',
'resources/mojo/services/network/public/mojom/ip_address.mojom.html',
'resources/polymer/v3_0/polymer/polymer_bundled.min.js',
'resources/js/load_time_data.m.js',
]:
# Exclude both the chrome://resources form and the scheme-relative form for
# files used in Polymer 3.
_BASE_EXCLUDES.append("chrome://" + excluded_file)
_BASE_EXCLUDES.append("//" + excluded_file)
def _request_list_path(out_path, target_name):
# Using |target_name| as a prefix which is guaranteed to be unique within the
# same folder, to avoid problems when multiple optimize_webui() targets in the
# same BUILD.gn file exist.
return os.path.join(out_path, target_name + '_requestlist.txt')
def _get_dep_path(dep, host_url, in_path):
if dep.startswith(host_url):
return dep.replace(host_url, os.path.relpath(in_path, _CWD))
elif not (dep.startswith('chrome://') or dep.startswith('//')):
return os.path.relpath(in_path, _CWD) + '/' + dep
return dep
# Get a list of all files that were bundled with rollup and update the
# depfile accordingly such that Ninja knows when to re-trigger.
def _update_dep_file(in_folder, args, manifest):
in_path = os.path.join(_CWD, in_folder)
# Gather the dependencies of all bundled root files.
request_list = []
for out_file in manifest:
request_list += manifest[out_file]
# Add a slash in front of every dependency that is not a chrome:// URL, so
# that we can map it to the correct source file path below.
request_list = map(lambda dep: _get_dep_path(dep, args.host_url, in_path),
request_list)
deps = map(os.path.normpath, request_list)
out_file_name = args.js_out_files[0]
with open(os.path.join(_CWD, args.depfile), 'w') as f:
deps_file_header = os.path.join(args.out_folder, out_file_name)
f.write(deps_file_header + ': ' + ' '.join(deps))
# Autogenerate a rollup config file so that we can import the plugin and
# pass it information about the location of the directories and files to exclude
# from the bundle.
def _generate_rollup_config(tmp_out_dir, path_to_plugin, in_path, host_url,
excludes, external_paths):
rollup_config_file = os.path.join(tmp_out_dir, 'rollup.config.js')
config_content = r'''
import plugin from '{plugin_path}';
export default ({{
plugins: [
plugin('{in_path}', '{host_url}', {exclude_list},
{external_path_list}, /* allowEmptyExtension= */ false) ]
}});
'''.format(plugin_path=path_to_plugin.replace('\\', '/'),
in_path=in_path.replace('\\', '/'),
host_url=host_url,
exclude_list=json.dumps(excludes),
external_path_list=json.dumps(external_paths))
with open(rollup_config_file, 'w') as f:
f.write(config_content);
return rollup_config_file;
# Create the manifest file from the sourcemap generated by rollup and return the
# list of bundles.
def _generate_manifest_file(tmp_out_dir, in_path, manifest_out_path):
generated_sourcemaps = glob.glob('%s/*.map' % tmp_out_dir)
manifest = {}
output_filenames = []
for sourcemap_file in generated_sourcemaps:
with open(sourcemap_file, 'r') as f:
sourcemap = json.loads(f.read())
if not 'sources' in sourcemap:
raise Exception('rollup could not construct source map')
sources = sourcemap['sources']
replaced_sources = []
for source in sources:
replaced_sources.append(
source.replace('../' + os.path.basename(in_path) + "/", ""))
filename = sourcemap_file[:-len('.map')]
manifest[os.path.basename(filename)] = replaced_sources
output_filenames.append(filename)
with open(manifest_out_path, 'w') as f:
f.write(json.dumps(manifest))
return output_filenames
def _bundle_v3(tmp_out_dir, in_path, out_path, manifest_out_path, args,
excludes, external_paths):
if not os.path.exists(tmp_out_dir):
os.makedirs(tmp_out_dir)
path_to_plugin = os.path.join(
os.path.abspath(_HERE_PATH), 'rollup_plugin.js')
rollup_config_file = _generate_rollup_config(tmp_out_dir, path_to_plugin,
in_path, args.host_url, excludes,
external_paths)
rollup_args = [os.path.join(in_path, f) for f in args.js_module_in_files]
# Confirm names are as expected. This is necessary to avoid having to replace
# import statements in the generated output files.
# TODO(rbpotter): Is it worth adding import statement replacement to support
# arbitrary names?
bundled_paths = []
for index, js_file in enumerate(args.js_module_in_files):
base_file_name = os.path.basename(js_file)
expected_name = '%s.rollup.js' % base_file_name[:-len('.js')]
assert args.js_out_files[index] == expected_name, \
'Output file corresponding to %s should be named %s' % \
(js_file, expected_name)
bundled_paths.append(os.path.join(tmp_out_dir, expected_name))
# This indicates that rollup is expected to generate a shared chunk file as
# well as one file per module. Set its name using --chunkFileNames. Note:
# Currently, this only supports 2 entry points, which generate 2 corresponding
# outputs and 1 shared output.
if (len(args.js_out_files) == 3):
assert len(args.js_module_in_files) == 2, \
'Expect 2 module entry points for generating 3 outputs'
shared_file_name = args.js_out_files[2]
rollup_args += [ '--chunkFileNames', shared_file_name ]
bundled_paths.append(os.path.join(tmp_out_dir, shared_file_name))
node.RunNode(
[node_modules.PathToRollup()] + rollup_args + [
'--format', 'esm',
'--dir', tmp_out_dir,
'--entryFileNames', '[name].rollup.js',
'--sourcemap', '--sourcemapExcludeSources',
'--config', rollup_config_file,
'--silent',
])
# Create the manifest file from the sourcemaps generated by rollup.
generated_paths = _generate_manifest_file(tmp_out_dir, in_path,
manifest_out_path)
assert len(generated_paths) == len(bundled_paths), \
'unexpected number of bundles - %s - generated by rollup' % \
(len(generated_paths))
for bundled_file in bundled_paths:
with open(bundled_file, 'r', encoding='utf-8') as f:
output = f.read()
assert "<if expr" not in output, \
'Unexpected <if expr> found in bundled output. Check that all ' + \
'input files using such expressions are preprocessed.'
return bundled_paths
def _optimize(in_folder, args):
in_path = os.path.normpath(os.path.join(_CWD, in_folder)).replace('\\', '/')
out_path = os.path.join(_CWD, args.out_folder).replace('\\', '/')
manifest_out_path = _request_list_path(out_path, args.target_name)
tmp_out_dir = tempfile.mkdtemp(dir=out_path).replace('\\', '/')
excludes = _BASE_EXCLUDES + [
# This file is dynamically created by C++. Should always be imported with a
# relative path.
'strings.m.js',
]
excludes.extend(args.exclude or [])
external_paths = args.external_paths or []
try:
pcb_out_paths = [os.path.join(tmp_out_dir, f) for f in args.js_out_files]
bundled_paths = _bundle_v3(tmp_out_dir, in_path, out_path,
manifest_out_path, args, excludes,
external_paths)
# Run polymer-css-build.
node.RunNode([node_modules.PathToPolymerCssBuild()] +
['--polymer-version', '2'] +
['--no-inline-includes', '-f'] +
bundled_paths + ['-o'] + pcb_out_paths)
# Pass the JS files through Terser and write the output to its final
# destination.
for index, js_out_file in enumerate(args.js_out_files):
node.RunNode([node_modules.PathToTerser(),
os.path.join(tmp_out_dir, js_out_file),
'--comments', '/Copyright|license|LICENSE|\<\/?if/',
'--output', os.path.join(out_path, js_out_file)])
finally:
shutil.rmtree(tmp_out_dir)
return manifest_out_path
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--depfile', required=True)
parser.add_argument('--target_name', required=True)
parser.add_argument('--exclude', nargs='*')
parser.add_argument('--external_paths', nargs='*')
parser.add_argument('--host', required=True)
parser.add_argument('--input', required=True)
parser.add_argument('--js_out_files', nargs='*', required=True)
parser.add_argument('--out_folder', required=True)
parser.add_argument('--js_module_in_files', nargs='*', required=True)
parser.add_argument('--out-manifest')
args = parser.parse_args(argv)
# NOTE(dbeam): on Windows, GN can send dirs/like/this. When joined, you might
# get dirs/like/this\file.txt. This looks odd to windows. Normalize to right
# the slashes.
args.depfile = os.path.normpath(args.depfile)
args.input = os.path.normpath(args.input)
args.out_folder = os.path.normpath(args.out_folder)
scheme_end_index = args.host.find('://')
if (scheme_end_index == -1):
args.host_url = 'chrome://%s/' % args.host
else:
args.host_url = args.host
manifest_out_path = _optimize(args.input, args)
# Prior call to _optimize() generated an output manifest file, containing
# information about all files that were bundled. Grab it from there.
with open(manifest_out_path, 'r') as f:
manifest = json.loads(f.read())
# Output a manifest file that will be used to auto-generate a grd file
# later.
if args.out_manifest:
manifest_data = {
'base_dir': args.out_folder,
'files': list(manifest.keys()),
}
with open(os.path.normpath(os.path.join(_CWD, args.out_manifest)), 'w') \
as manifest_file:
json.dump(manifest_data, manifest_file)
_update_dep_file(args.input, args, manifest)
if __name__ == '__main__':
main(sys.argv[1:])
| |
#!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Cici,Li<cici.x.li@intel.com>
# Lin, Wanming <wanming.lin@intel.com>
import os
import sys
import commands
import shutil
import glob
import fnmatch
import re
import json
reload(sys)
sys.setdefaultencoding("utf-8")
script_path = os.path.realpath(__file__)
const_path = os.path.dirname(script_path)
tool_path = const_path + "/../tools/"
plugin_tool = const_path + "/../tools/cordova-plugin-crosswalk-webview/"
testapp_path = "/tmp/cordova-sampleapp/"
def setUp():
global ARCH, MODE, CORDOVA_VERSION, device, CROSSWALK_VERSION
device = os.environ.get('DEVICE_ID')
if not device:
print (" get env error\n")
sys.exit(1)
f_arch = open(const_path + "/../arch.txt", 'r')
arch_tmp = f_arch.read()
if arch_tmp.strip("\n\t") == "arm":
ARCH = "arm"
elif arch_tmp.strip("\n\t") == "x86":
ARCH = "x86"
else:
print (
" get arch error, the content of arch.txt should be 'arm' or 'x86'\n")
sys.exit(1)
f_arch.close()
f_mode = open(const_path + "/../mode.txt", 'r')
mode_tmp = f_mode.read()
if mode_tmp.strip("\n\t") == "shared":
MODE = "shared"
elif mode_tmp.strip("\n\t") == "embedded":
MODE = "embedded"
else:
print (
" get mode error, the content of mode.txt should be 'shared' or 'embedded'\n")
sys.exit(1)
f_mode.close()
f_version = open(const_path + "/../cordova-version", 'r')
if f_version.read().strip("\n\t") != "3.6":
CORDOVA_VERSION = "4.x"
else:
CORDOVA_VERSION = "3.6"
f_version.close()
if CORDOVA_VERSION == "4.x":
with open(const_path + "/../VERSION", "rt") as pkg_version_file:
pkg_version_raw = pkg_version_file.read()
pkg_version_file.close()
pkg_version_json = json.loads(pkg_version_raw)
CROSSWALK_VERSION = pkg_version_json["main-version"]
def create(appname, pkgname, mode, sourcecodepath, replace_index_list, self):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Create project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova create %s %s %s" % (appname, pkgname, appname)
else:
if mode == "shared":
cmd = "cordova/bin/create %s %s %s --xwalk-shared-library" % (
appname, pkgname, appname)
else:
cmd = "cordova/bin/create %s %s %s" % (appname, pkgname, appname)
createstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, createstatus[0])
print "\nGenerate project %s ----------------> OK\n" % appname
result = commands.getstatusoutput("ls")
self.assertIn(appname, result[1])
project_root = os.path.join(tool_path, appname)
if CORDOVA_VERSION == "4.x":
os.chdir(project_root)
if not replace_key(os.path.join(project_root, 'config.xml'),
'<widget android-activityName="%s"' % appname, '<widget'):
print "replace key '<widget' failed."
return False
if not replace_key(os.path.join(project_root, 'config.xml'),
' <allow-navigation href="*" />\n</widget>', '</widget>'):
print "replace key '</widget>' failed."
return False
print "Add android platforms to this project --------------> START"
cordova_platform_cmd = "cordova platform add android"
platformstatus = commands.getstatusoutput(cordova_platform_cmd)
self.assertEquals(0, platformstatus[0])
print "Install Crosswalk WebView Plugin --------------> START"
plugin_install_webview = "cordova plugin add %s --variable CROSSWALK_ANDROID_VERSION=\"%s\"" % (plugin_tool, CROSSWALK_VERSION)
if mode == "shared":
plugin_install_cmd = plugin_install_webview + " --variable LIB_MODE=\"shared\""
else:
plugin_install_cmd = plugin_install_webview + " --variable LIB_MODE=\"embedd\""
pluginstatus = commands.getstatusoutput(plugin_install_cmd)
self.assertEquals(0, pluginstatus[0])
if replace_index_list is not None and len(replace_index_list) >= 2:
index_file_path = os.path.join(project_root, "www", "index.html")
key = replace_index_list[0]
content = replace_index_list[1]
if not replace_key(index_file_path, content, key):
print "replace key: " + key + " failed."
return False
if sourcecodepath is not None:
do_remove(glob.glob(os.path.join(project_root, "www")))
do_copy(sourcecodepath, os.path.join(tool_path, appname, "www"))
else:
if replace_index_list is not None and len(replace_index_list) >= 2:
index_file_path = os.path.join(
project_root,
"assets",
"www",
"index.html")
key = replace_index_list[0]
content = replace_index_list[1]
if not replace_key(index_file_path, content, key):
print "replace key: " + key + " failed."
return False
if sourcecodepath is not None:
do_remove(glob.glob(os.path.join(project_root, "assets", "www")))
do_copy(
sourcecodepath,
os.path.join(
tool_path,
appname,
"assets",
"www"))
def buildGoogleApp(appname, sourcecodepath, self):
os.chdir(tool_path)
if os.path.exists(os.path.join(tool_path, appname)):
print "Existing %s project, try to clean up..." % appname
do_remove(glob.glob(os.path.join(tool_path, appname)))
print "Build project %s ----------------> START" % appname
if sourcecodepath is None:
print "sourcecodepath can't be none"
return False
if checkContains(appname, "CIRC"):
cordova_app = os.path.join(tool_path, "circ")
create_cmd = "cca create " + appname + " --link-to circ/package"
elif checkContains(appname, "EH"):
cordova_app = os.path.join(tool_path, "workshop-cca-eh")
create_cmd = "cca create " + appname + " --link-to workshop-cca-eh/workshop/step4"
if os.path.exists(cordova_app):
do_remove(glob.glob(cordova_app))
if not do_copy(sourcecodepath, cordova_app):
return False
print create_cmd
buildstatus = commands.getstatusoutput(create_cmd)
self.assertEquals(0, buildstatus[0])
os.chdir(os.path.join(tool_path, appname))
print "Add android platforms to this project --------------> START"
add_android_cmd = "cca platform add android"
addstatus = commands.getstatusoutput(add_android_cmd)
self.assertEquals(0, addstatus[0])
print "uninstall webview default plugin from this project --------------> START"
plugin_uninstall_webview = "cordova plugin remove cordova-plugin-crosswalk-webview"
uninstallStatus = commands.getstatusoutput(plugin_uninstall_webview)
self.assertEquals(0, uninstallStatus[0])
print "Install Crosswalk WebView Plugin --------------> START"
plugin_install_webview = "cordova plugin add %s --variable CROSSWALK_ANDROID_VERSION=\"%s\"" % (plugin_tool, CROSSWALK_VERSION)
if MODE == "shared":
plugin_install_cmd = plugin_install_webview + " --variable LIB_MODE=\"shared\""
else:
plugin_install_cmd = plugin_install_webview + " --variable LIB_MODE=\"embedd\""
pluginstatus = commands.getstatusoutput(plugin_install_cmd)
self.assertEquals(0, pluginstatus[0])
build_cmd = "cca build android"
buildstatus = commands.getstatusoutput(build_cmd)
self.assertEquals(0, buildstatus[0])
os.chdir(
os.path.join(
tool_path,
appname,
"platforms",
"android",
"build",
"outputs",
"apk"))
result = commands.getstatusoutput("ls")
self.assertIn(".apk", result[1])
print result[1]
if "android" in result[1]:
self.assertIn("android", result[1])
else:
self.assertIn(appname, result[1])
def build(appname, isDebug, self):
os.chdir(os.path.join(tool_path, appname))
print "Build project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova build android"
if isDebug == True:
print "build debug app"
cmd = "cordova build android --debug"
else:
cmd = "./cordova/build"
if isDebug == True:
print "build debug app"
cmd = "./cordova/build --debug"
print cmd
buildstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, buildstatus[0])
print "\nBuild project %s ----------------> OK\n" % appname
if CORDOVA_VERSION == "4.x":
os.chdir(
os.path.join(
tool_path,
appname,
"platforms",
"android",
"build",
"outputs",
"apk"))
else:
os.chdir(os.path.join(tool_path, appname, "bin"))
result = commands.getstatusoutput("ls")
self.assertIn(".apk", result[1])
print result[1]
if "android" in result[1]:
self.assertIn("android", result[1])
else:
self.assertIn(appname, result[1])
def run(appname, self):
os.chdir(os.path.join(tool_path, appname))
print "Run project %s ----------------> START" % appname
if CORDOVA_VERSION == "4.x":
cmd = "cordova run android"
else:
cmd = "./cordova/run"
print cmd
runstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, runstatus[0])
self.assertIn("LAUNCH SUCCESS", runstatus[1])
print "\nRun project %s ----------------> OK\n" % appname
def app_install(appname, pkgname, self):
print "Install APK ----------------> START"
os.chdir(testapp_path)
apk_file = commands.getstatusoutput("ls | grep %s" % appname)[1]
if apk_file == "":
print "Error: No app: %s found in directory: %s" % (appname, testapp_path)
cmd_inst = "adb -s " + device + " install -r " + apk_file
inststatus = commands.getstatusoutput(cmd_inst)
self.assertEquals(0, inststatus[0])
print "Install APK ----------------> OK"
self.assertTrue(check_app_installed(pkgname, self))
def checkContains(origin_str=None, key_str=None):
if origin_str.upper().find(key_str.upper()) >= 0:
return True
return False
def check_app_installed(pkgname, self):
print "Check if app is installed ----------------> START"
cmd_find = "adb -s " + device + \
" shell pm list packages |grep %s" % pkgname
pmstatus = commands.getstatusoutput(cmd_find)
if pmstatus[0] == 0:
print "App is installed."
return True
else:
print "App is uninstalled."
return False
def app_launch(appname, pkgname, self):
print "Launch APK ----------------> START"
cmd = "adb -s " + device + " shell am start -n %s/.%s" % (pkgname, appname)
launchstatus = commands.getstatusoutput(cmd)
self.assertNotIn("error", launchstatus[1].lower())
print "Launch APK ----------------> OK"
# Find whether the app have launched
def check_app_launched(pkgname, self):
cmd_acti = "adb -s " + device + " shell ps | grep %s" % pkgname
launched = commands.getstatusoutput(cmd_acti)
if launched[0] != 0:
print "App haven't launched."
return False
else:
print "App is have launched."
return True
def app_stop(pkgname, self):
print "Stop APK ----------------> START"
cmd = "adb -s " + device + " shell am force-stop %s" % pkgname
stopstatus = commands.getstatusoutput(cmd)
self.assertEquals(0, stopstatus[0])
print "Stop APK ----------------> OK"
def app_uninstall(pkgname, self):
print "Uninstall APK ----------------> START"
cmd_uninst = "adb -s " + device + " uninstall %s" % (pkgname)
unistatus = commands.getstatusoutput(cmd_uninst)
self.assertEquals(0, unistatus[0])
print "Uninstall APK ----------------> OK"
def replace_key(file_path, content, key):
print "Replace value ----------------> START"
f = open(file_path, "r")
f_content = f.read()
f.close()
pos = f_content.find(key)
if pos != -1:
f_content = f_content.replace(key, content)
f = open(file_path, "w")
f.write(f_content)
f.close()
else:
print "Fail to replace: %s with: %s in file: %s" % (content, key, file_path)
return False
print "Replace value ----------------> OK"
return True
def do_remove(target_file_list=None):
for i_file in target_file_list:
print "Removing %s" % i_file
try:
if os.path.isdir(i_file):
shutil.rmtree(i_file)
else:
os.remove(i_file)
except Exception as e:
print "Fail to remove file %s: %s" % (i_file, e)
return False
return True
def do_copy(src_item=None, dest_item=None):
print "Copying %s to %s" % (src_item, dest_item)
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
print "Create non-existent dir: %s" % os.path.dirname(dest_item)
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception as e:
print "Fail to copy file %s: %s" % (src_item, e)
return False
return True
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
| |
"""
This module provides convenient functions to transform sympy expressions to
lambda functions which can be used to calculate numerical values very fast.
"""
from __future__ import print_function, division
from sympy.external import import_module
from sympy.core.compatibility import exec_, is_sequence, iterable, string_types
from sympy.utilities.decorator import doctest_depends_on
import inspect
# These are the namespaces the lambda functions will use.
MATH = {}
MPMATH = {}
NUMPY = {}
SYMPY = {}
# Default namespaces, letting us define translations that can't be defined
# by simple variable maps, like I => 1j
# These are separate from the names above because the above names are modified
# throughout this file, whereas these should remain unmodified.
MATH_DEFAULT = {}
MPMATH_DEFAULT = {}
NUMPY_DEFAULT = {"I": 1j}
SYMPY_DEFAULT = {}
# Mappings between sympy and other modules function names.
MATH_TRANSLATIONS = {
"Abs": "fabs",
"ceiling": "ceil",
"E": "e",
"ln": "log",
}
MPMATH_TRANSLATIONS = {
"Abs": "fabs",
"elliptic_k": "ellipk",
"elliptic_f": "ellipf",
"elliptic_e": "ellipe",
"elliptic_pi": "ellippi",
"ceiling": "ceil",
"chebyshevt": "chebyt",
"chebyshevu": "chebyu",
"E": "e",
"I": "j",
"ln": "log",
#"lowergamma":"lower_gamma",
"oo": "inf",
#"uppergamma":"upper_gamma",
"LambertW": "lambertw",
"Matrix": "matrix",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"conjugate": "conj",
"dirichlet_eta": "altzeta",
"Ei": "ei",
"Shi": "shi",
"Chi": "chi",
"Si": "si",
"Ci": "ci"
}
NUMPY_TRANSLATIONS = {
"Abs": "abs",
"acos": "arccos",
"acosh": "arccosh",
"arg": "angle",
"asin": "arcsin",
"asinh": "arcsinh",
"atan": "arctan",
"atan2": "arctan2",
"atanh": "arctanh",
"ceiling": "ceil",
"E": "e",
"im": "imag",
"ln": "log",
"Matrix": "matrix",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix",
"Max": "amax",
"Min": "amin",
"oo": "inf",
"re": "real",
}
# Available modules:
MODULES = {
"math": (MATH, MATH_DEFAULT, MATH_TRANSLATIONS, ("from math import *",)),
"mpmath": (MPMATH, MPMATH_DEFAULT, MPMATH_TRANSLATIONS, ("from sympy.mpmath import *",)),
"numpy": (NUMPY, NUMPY_DEFAULT, NUMPY_TRANSLATIONS, ("import_module('numpy')",)),
"sympy": (SYMPY, SYMPY_DEFAULT, {}, (
"from sympy.functions import *",
"from sympy.matrices import *",
"from sympy import Integral, pi, oo, nan, zoo, E, I",)),
}
def _import(module, reload="False"):
"""
Creates a global translation dictionary for module.
The argument module has to be one of the following strings: "math",
"mpmath", "numpy", "sympy".
These dictionaries map names of python functions to their equivalent in
other modules.
"""
try:
namespace, namespace_default, translations, import_commands = MODULES[
module]
except KeyError:
raise NameError(
"'%s' module can't be used for lambdification" % module)
# Clear namespace or exit
if namespace != namespace_default:
# The namespace was already generated, don't do it again if not forced.
if reload:
namespace.clear()
namespace.update(namespace_default)
else:
return
for import_command in import_commands:
if import_command.startswith('import_module'):
module = eval(import_command)
if module is not None:
namespace.update(module.__dict__)
continue
else:
try:
exec_(import_command, {}, namespace)
continue
except ImportError:
pass
raise ImportError(
"can't import '%s' with '%s' command" % (module, import_command))
# Add translated names to namespace
for sympyname, translation in translations.items():
namespace[sympyname] = namespace[translation]
@doctest_depends_on(modules=('numpy'))
def lambdify(args, expr, modules=None, printer=None, use_imps=True,
dummify=True, use_array=False):
"""
Returns a lambda function for fast calculation of numerical values.
If not specified differently by the user, SymPy functions are replaced as
far as possible by either python-math, numpy (if available) or mpmath
functions - exactly in this order. To change this behavior, the "modules"
argument can be used. It accepts:
- the strings "math", "mpmath", "numpy", "sympy"
- any modules (e.g. math)
- dictionaries that map names of sympy functions to arbitrary functions
- lists that contain a mix of the arguments above, with higher priority
given to entries appearing first.
The default behavior is to substitute all arguments in the provided
expression with dummy symbols. This allows for applied functions (e.g.
f(t)) to be supplied as arguments. Call the function with dummify=False if
dummy substitution is unwanted (and `args` is not a string). If you want
to view the lambdified function or provide "sympy" as the module, you
should probably set dummify=False.
If numpy is installed, the default behavior is to substitute Sympy Matrices
with numpy.matrix. If you would rather have a numpy.array returned,
set use_array=True.
Usage
=====
(1) Use one of the provided modules:
>>> from sympy import lambdify, sin, tan, gamma
>>> from sympy.utilities.lambdify import lambdastr
>>> from sympy.abc import x, y
>>> f = lambdify(x, sin(x), "math")
Attention: Functions that are not in the math module will throw a name
error when the lambda function is evaluated! So this would
be better:
>>> f = lambdify(x, sin(x)*gamma(x), ("math", "mpmath", "sympy"))
(2) Use some other module:
>>> import numpy
>>> f = lambdify((x,y), tan(x*y), numpy)
Attention: There are naming differences between numpy and sympy. So if
you simply take the numpy module, e.g. sympy.atan will not be
translated to numpy.arctan. Use the modified module instead
by passing the string "numpy":
>>> f = lambdify((x,y), tan(x*y), "numpy")
>>> f(1, 2)
-2.18503986326
>>> from numpy import array
>>> f(array([1, 2, 3]), array([2, 3, 5]))
[-2.18503986 -0.29100619 -0.8559934 ]
(3) Use a dictionary defining custom functions:
>>> def my_cool_function(x): return 'sin(%s) is cool' % x
>>> myfuncs = {"sin" : my_cool_function}
>>> f = lambdify(x, sin(x), myfuncs); f(1)
'sin(1) is cool'
Examples
========
>>> from sympy.utilities.lambdify import implemented_function, lambdify
>>> from sympy import sqrt, sin, Matrix
>>> from sympy import Function
>>> from sympy.abc import w, x, y, z
>>> f = lambdify(x, x**2)
>>> f(2)
4
>>> f = lambdify((x, y, z), [z, y, x])
>>> f(1,2,3)
[3, 2, 1]
>>> f = lambdify(x, sqrt(x))
>>> f(4)
2.0
>>> f = lambdify((x, y), sin(x*y)**2)
>>> f(0, 5)
0.0
>>> row = lambdify((x, y), Matrix((x, x + y)).T, modules='sympy')
>>> row(1, 2)
Matrix([[1, 3]])
>>> col = lambdify((x, y), Matrix((x, x + y)), use_array=True)
>>> col(1, 2)
array([[1],
[3]])
Tuple arguments are handled and the lambdified function should
be called with the same type of arguments as were used to create
the function.:
>>> f = lambdify((x, (y, z)), x + y)
>>> f(1, (2, 4))
3
A more robust way of handling this is to always work with flattened
arguments:
>>> from sympy.utilities.iterables import flatten
>>> args = w, (x, (y, z))
>>> vals = 1, (2, (3, 4))
>>> f = lambdify(flatten(args), w + x + y + z)
>>> f(*flatten(vals))
10
Functions present in `expr` can also carry their own numerical
implementations, in a callable attached to the ``_imp_``
attribute. Usually you attach this using the
``implemented_function`` factory:
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> func = lambdify(x, f(x))
>>> func(4)
5
``lambdify`` always prefers ``_imp_`` implementations to implementations
in other namespaces, unless the ``use_imps`` input parameter is False.
"""
from sympy.core.symbol import Symbol
from sympy.utilities.iterables import flatten
# If the user hasn't specified any modules, use what is available.
module_provided = True
if modules is None:
module_provided = False
# Use either numpy (if available) or python.math where possible.
# XXX: This leads to different behaviour on different systems and
# might be the reason for irreproducible errors.
modules = ["math", "mpmath", "sympy"]
#If numpy.array should be used instead of numpy.matrix
if use_array:
NUMPY_TRANSLATIONS.update({"Matrix": "array",
"MutableDenseMatrix": "array",
"ImmutableMatrix": "array"})
else:
#Ensures that the translation dict is set back
#to matrix if lambdify was already called
NUMPY_TRANSLATIONS.update({"Matrix": "matrix",
"MutableDenseMatrix": "matrix",
"ImmutableMatrix": "matrix"})
#Attempt to import numpy
try:
_import("numpy")
except ImportError:
pass
else:
modules.insert(1, "numpy")
# Get the needed namespaces.
namespaces = []
# First find any function implementations
if use_imps:
namespaces.append(_imp_namespace(expr))
# Check for dict before iterating
if isinstance(modules, (dict, str)) or not hasattr(modules, '__iter__'):
namespaces.append(modules)
else:
namespaces += list(modules)
# fill namespace with first having highest priority
namespace = {}
for m in namespaces[::-1]:
buf = _get_namespace(m)
namespace.update(buf)
if hasattr(expr, "atoms"):
#Try if you can extract symbols from the expression.
#Move on if expr.atoms in not implemented.
syms = expr.atoms(Symbol)
for term in syms:
namespace.update({str(term): term})
# Create lambda function.
lstr = lambdastr(args, expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
if flat in lstr:
import itertools
namespace.update({flat: flatten})
return eval(lstr, namespace)
def _get_namespace(m):
"""
This is used by _lambdify to parse its arguments.
"""
if isinstance(m, str):
_import(m)
return MODULES[m][0]
elif isinstance(m, dict):
return m
elif hasattr(m, "__dict__"):
return m.__dict__
else:
raise TypeError("Argument must be either a string, dict or module but it is: %s" % m)
def lambdastr(args, expr, printer=None, dummify=False):
"""
Returns a string that can be evaluated to a lambda function.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.utilities.lambdify import lambdastr
>>> lambdastr(x, x**2)
'lambda x: (x**2)'
>>> lambdastr((x,y,z), [z,y,x])
'lambda x,y,z: ([z, y, x])'
Although tuples may not appear as arguments to lambda in Python 3,
lambdastr will create a lambda function that will unpack the original
arguments so that nested arguments can be handled:
>>> lambdastr((x, (y, z)), x + y)
'lambda _0,_1: (lambda x,y,z: (x + y))(*list(__flatten_args__([_0,_1])))'
"""
# Transforming everything to strings.
from sympy.matrices import DeferredVector
from sympy import Dummy, sympify, Symbol, Function, flatten
if printer is not None:
if inspect.isfunction(printer):
lambdarepr = printer
else:
if inspect.isclass(printer):
lambdarepr = lambda expr: printer().doprint(expr)
else:
lambdarepr = lambda expr: printer.doprint(expr)
else:
#XXX: This has to be done here because of circular imports
from sympy.printing.lambdarepr import lambdarepr
def sub_args(args, dummies_dict):
if isinstance(args, str):
return args
elif isinstance(args, DeferredVector):
return str(args)
elif iterable(args):
dummies = flatten([sub_args(a, dummies_dict) for a in args])
return ",".join(str(a) for a in dummies)
else:
#Sub in dummy variables for functions or symbols
if isinstance(args, (Function, Symbol)):
dummies = Dummy()
dummies_dict.update({args : dummies})
return str(dummies)
else:
return str(args)
def sub_expr(expr, dummies_dict):
try:
expr = sympify(expr).xreplace(dummies_dict)
except:
if isinstance(expr, DeferredVector):
pass
elif isinstance(expr, dict):
k = [sub_expr(sympify(a), dummies_dict) for a in expr.keys()]
v = [sub_expr(sympify(a), dummies_dict) for a in expr.values()]
expr = dict(zip(k, v))
elif isinstance(expr, tuple):
expr = tuple(sub_expr(sympify(a), dummies_dict) for a in expr)
elif isinstance(expr, list):
expr = [sub_expr(sympify(a), dummies_dict) for a in expr]
return expr
# Transform args
def isiter(l):
return iterable(l, exclude=(str, DeferredVector))
if isiter(args) and any(isiter(i) for i in args):
from sympy.utilities.iterables import flatten
import re
dum_args = [str(Dummy(str(i))) for i in range(len(args))]
iter_args = ','.join([i if isiter(a) else i
for i, a in zip(dum_args, args)])
lstr = lambdastr(flatten(args), expr, printer=printer, dummify=dummify)
flat = '__flatten_args__'
rv = 'lambda %s: (%s)(*list(%s([%s])))' % (
','.join(dum_args), lstr, flat, iter_args)
if len(re.findall(r'\b%s\b' % flat, rv)) > 1:
raise ValueError('the name %s is reserved by lambdastr' % flat)
return rv
dummies_dict = {}
if dummify:
args = sub_args(args, dummies_dict)
else:
if isinstance(args, str):
pass
elif iterable(args, exclude=DeferredVector):
args = ",".join(str(a) for a in args)
# Transform expr
if dummify:
if isinstance(expr, str):
pass
else:
expr = sub_expr(expr, dummies_dict)
expr = lambdarepr(expr)
return "lambda %s: (%s)" % (args, expr)
def _imp_namespace(expr, namespace=None):
""" Return namespace dict with function implementations
We need to search for functions in anything that can be thrown at
us - that is - anything that could be passed as `expr`. Examples
include sympy expressions, as well as tuples, lists and dicts that may
contain sympy expressions.
Parameters
----------
expr : object
Something passed to lambdify, that will generate valid code from
``str(expr)``.
namespace : None or mapping
Namespace to fill. None results in new empty dict
Returns
-------
namespace : dict
dict with keys of implemented function names within `expr` and
corresponding values being the numerical implementation of
function
Examples
--------
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import implemented_function, _imp_namespace
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> g = implemented_function(Function('g'), lambda x: x*10)
>>> namespace = _imp_namespace(f(g(x)))
>>> sorted(namespace.keys())
['f', 'g']
"""
# Delayed import to avoid circular imports
from sympy.core.function import FunctionClass
if namespace is None:
namespace = {}
# tuples, lists, dicts are valid expressions
if is_sequence(expr):
for arg in expr:
_imp_namespace(arg, namespace)
return namespace
elif isinstance(expr, dict):
for key, val in expr.items():
# functions can be in dictionary keys
_imp_namespace(key, namespace)
_imp_namespace(val, namespace)
return namespace
# sympy expressions may be Functions themselves
func = getattr(expr, 'func', None)
if isinstance(func, FunctionClass):
imp = getattr(func, '_imp_', None)
if imp is not None:
name = expr.func.__name__
if name in namespace and namespace[name] != imp:
raise ValueError('We found more than one '
'implementation with name '
'"%s"' % name)
namespace[name] = imp
# and / or they may take Functions as arguments
if hasattr(expr, 'args'):
for arg in expr.args:
_imp_namespace(arg, namespace)
return namespace
def implemented_function(symfunc, implementation):
""" Add numerical ``implementation`` to function ``symfunc``.
``symfunc`` can be an ``UndefinedFunction`` instance, or a name string.
In the latter case we create an ``UndefinedFunction`` instance with that
name.
Be aware that this is a quick workaround, not a general method to create
special symbolic functions. If you want to create a symbolic function to be
used by all the machinery of sympy you should subclass the ``Function``
class.
Parameters
----------
symfunc : ``str`` or ``UndefinedFunction`` instance
If ``str``, then create new ``UndefinedFunction`` with this as
name. If `symfunc` is a sympy function, attach implementation to it.
implementation : callable
numerical implementation to be called by ``evalf()`` or ``lambdify``
Returns
-------
afunc : sympy.FunctionClass instance
function with attached implementation
Examples
--------
>>> from sympy.abc import x
>>> from sympy.utilities.lambdify import lambdify, implemented_function
>>> from sympy import Function
>>> f = implemented_function(Function('f'), lambda x: x+1)
>>> lam_f = lambdify(x, f(x))
>>> lam_f(4)
5
"""
# Delayed import to avoid circular imports
from sympy.core.function import UndefinedFunction
# if name, create function to hold implementation
if isinstance(symfunc, string_types):
symfunc = UndefinedFunction(symfunc)
elif not isinstance(symfunc, UndefinedFunction):
raise ValueError('symfunc should be either a string or'
' an UndefinedFunction instance.')
# We need to attach as a method because symfunc will be a class
symfunc._imp_ = staticmethod(implementation)
return symfunc
| |
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class OutOfInventoryTest(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[1]
bob = self.nodes[2]
# post profile for alice
with open('testdata/'+ self.vendor_version +'/profile.json') as profile_file:
profile_json = json.load(profile_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/profile"
requests.post(api_url, data=json.dumps(profile_json, indent=4))
# post listing to alice
with open('testdata/'+ self.vendor_version +'/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
if self.vendor_version == 4:
listing_json["metadata"]["priceCurrency"] = "t" + self.cointype
else:
listing_json["item"]["priceCurrency"]["code"] = "t" + self.cointype
listing_json["metadata"]["acceptedCurrencies"] = ["t" + self.cointype]
listing_json["item"]["skus"][4]["quantity"] = 0
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("OutOfInventoryTest - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("OutOfInventoryTest - FAIL: Listing POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# get listing hash
api_url = alice["gateway_url"] + "ob/listings/" + alice["peerId"]
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("OutOfInventoryTest - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob fetch listing to cache
api_url = bob["gateway_url"] + "ipfs/" + listingId
requests.get(api_url)
# generate some coins and send them to bob
api_url = bob["gateway_url"] + "wallet/address/" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("OutOfInventoryTest - FAIL: Address endpoint not found")
else:
raise TestFailure("OutOfInventoryTest - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, 10)
time.sleep(3)
# shutdown alice
api_url = alice["gateway_url"] + "ob/shutdown"
requests.post(api_url, data="")
time.sleep(10)
# bob send order
with open('testdata/'+ self.buyer_version +'/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["paymentCoin"] = "t" + self.cointype
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("OutOfInventoryTest - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("OutOfInventoryTest - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
if resp["vendorOnline"] == True:
raise TestFailure("OutOfInventoryTest - FAIL: Purchase returned vendor is online")
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("OutOfInventoryTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("OutOfInventoryTest - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("OutOfInventoryTest - FAIL: Bob incorrectly saved as funded")
# fund order
spend = {
"currencyCode": "T" + self.cointype,
"address": payment_address,
"amount": payment_amount["amount"],
"feeLevel": "NORMAL",
"requireAssociateOrder": False
}
if self.buyer_version == 4:
spend["amount"] = payment_amount
spend["wallet"] = "T" + self.cointype
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("OutOfInventoryTest - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("OutOfInventoryTest - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("OutOfInventoryTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if len(resp["paymentAddressTransactions"]) <= 0:
raise TestFailure("OutOfInventoryTest - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("OutOfInventoryTest - FAIL: Bob incorrectly saved as unfunded")
if resp["state"] != "PENDING":
raise TestFailure("OutOfInventoryTest - FAIL: Bob purchase saved in incorrect state")
# generate one more block containing this tx
self.send_bitcoin_cmd("generate", 1)
# startup alice again
self.start_node(1, alice)
time.sleep(45)
# check alice detected order and payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("OutOfInventoryTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "PENDING":
raise TestFailure("OutOfInventoryTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("OutOfInventoryTest - FAIL: Alice incorrectly saved as unfunded")
# check alice balance is zero
api_url = alice["gateway_url"] + "wallet/balance/T" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
unconfirmed = int(resp["unconfirmed"])
if confirmed + unconfirmed > 0:
raise TestFailure("OutOfInventoryTest - FAIL: Alice should have zero balance at this point")
else:
raise TestFailure("OutOfInventoryTest - FAIL: Failed to query Alice's balance")
time.sleep(1)
# alice confirm offline order
api_url = alice["gateway_url"] + "ob/orderconfirmation"
oc = {
"orderId": orderId,
"reject": False
}
r = requests.post(api_url, data=json.dumps(oc, indent=4))
if r.status_code == 404:
raise TestFailure("OutOfInventoryTest - FAIL: Order confirmation post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("OutOfInventoryTest - FAIL: order confirmation POST failed. Reason: %s", resp["reason"])
time.sleep(10)
self.send_bitcoin_cmd("generate", 1)
time.sleep(2)
# Check the funds moved into alice's wallet
api_url = alice["gateway_url"] + "wallet/balance/T" + self.cointype
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
if confirmed <= 0:
raise TestFailure("OutOfInventoryTest - FAIL: Alice failed to receive the multisig payout")
else:
raise TestFailure("OutOfInventoryTest - FAIL: Failed to query Alice's balance")
# check bob detected order confirmation and outgoing transaction
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("OutOfInventoryTest - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("OutOfInventoryTest - FAIL: Bob failed to set state correctly")
if resp["funded"] == False:
raise TestFailure("OutOfInventoryTest - FAIL: Bob incorrectly saved as unfunded")
# check alice set state correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("OutOfInventoryTest - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("OutOfInventoryTest - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("OutOfInventoryTest - FAIL: Alice incorrectly saved as unfunded")
print("OutOfInventoryTest - PASS")
if __name__ == '__main__':
print("Running OutOfInventoryTest")
OutOfInventoryTest().main(["--regtest", "--disableexchangerates"])
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2009,2012 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""This module manages interaction with version control systems.
To implement support for a new version control system, inherit from
:class:`GenericRevisionControlSystem`.
TODO:
- Add authentication handling
- :func:`commitdirectory` should do a single commit instead of one for
each file
- Maybe implement some caching for :func:`get_versioned_object` - check
profiler
"""
import os
import re
import subprocess
DEFAULT_RCS = ["svn", "cvs", "darcs", "git", "bzr", "hg"]
"""the names of all supported revision control systems
modules of the same name containing a class with the same name are expected
to be defined below 'translate.storage.versioncontrol'
"""
__CACHED_RCS_CLASSES = {}
"""The dynamically loaded revision control system implementations (python
modules) are cached here for faster access.
"""
def __get_rcs_class(name):
if not name in __CACHED_RCS_CLASSES:
try:
module = __import__("translate.storage.versioncontrol.%s" % name,
globals(), {}, name)
# the module function "is_available" must return "True"
if (hasattr(module, "is_available") and \
callable(module.is_available) and \
module.is_available()):
# we found an appropriate module
rcs_class = getattr(module, name)
else:
# the RCS client does not seem to be installed
rcs_class = None
try:
DEFAULT_RCS.remove(name)
except ValueError:
# we might have had a race condition and another thread
# already removed it
pass
except (ImportError, AttributeError):
rcs_class = None
__CACHED_RCS_CLASSES[name] = rcs_class
return __CACHED_RCS_CLASSES[name]
def run_command(command, cwd=None):
"""Runs a command (array of program name and arguments) and returns the
exitcode, the output and the error as a tuple.
:param command: list of arguments to be joined for a program call
:type command: list
:param cwd: optional directory where the command should be executed
:type cwd: str
"""
# ok - we use "subprocess"
try:
proc = subprocess.Popen(args=command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=cwd)
(output, error) = proc.communicate()
ret = proc.returncode
return ret, output, error
except OSError, err_msg:
# failed to run the program (e.g. the executable was not found)
return -1, "", err_msg
def prepare_filelist(files):
if not isinstance(files, list):
files = [files]
return [os.path.realpath(f) for f in files]
def youngest_ancestor(files):
return os.path.commonprefix([os.path.dirname(f) for f in files])
class GenericRevisionControlSystem(object):
"""The super class for all version control classes.
Always inherit from this class to implement another RC interface.
At least the two attributes :attr:`RCS_METADIR` and :attr:`SCAN_PARENTS`
must be overriden by all implementations that derive from this class.
By default, all implementations can rely on the following attributes:
- :attr:`root_dir`: the parent of the metadata directory of the
working copy
- :attr:`location_abs`: the absolute path of the RCS object
- :attr:`location_rel`: the path of the RCS object relative
to :attr:`root_dir`
"""
RCS_METADIR = None
"""The name of the metadata directory of the RCS
e.g.: for Subversion -> ".svn"
"""
SCAN_PARENTS = None
"""Whether to check the parent directories for the metadata directory of
the RCS working copy
Some revision control systems store their metadata directory only
in the base of the working copy (e.g. bzr, GIT and Darcs)
use ``True`` for these RCS
Other RCS store a metadata directory in every single directory of
the working copy (e.g. Subversion and CVS)
use ``False`` for these RCS
"""
def __init__(self, location, oldest_parent=None):
"""Find the relevant information about this RCS object
The :exc:`IOError` exception indicates that the specified object (file
or directory) is not controlled by the given version control system.
:param oldest_parent: optional highest path where a recursive search
should be stopped
:type oldest_parent: str
"""
# check if the implementation looks ok - otherwise raise IOError
self._self_check()
# search for the repository information
location = os.path.normpath(location)
result = self._find_rcs_directory(location, oldest_parent)
if result is None:
raise IOError("Could not find revision control information: %s" \
% location)
self.root_dir, self.location_abs, self.location_rel = result
if not os.path.isdir(location):
if not self._file_exists(location):
raise IOError("Not present in repository: %s" % location)
def _find_rcs_directory(self, rcs_obj, oldest_parent=None):
"""Try to find the metadata directory of the RCS
:param oldest_parent: optional highest path where a recursive search
should be stopped
:type oldest_parent: str
:rtype: tuple
:return:
- the absolute path of the directory, that contains the metadata
directory
- the absolute path of the RCS object
- the relative path of the RCS object based on the directory above
"""
if os.path.isdir(os.path.abspath(rcs_obj)):
rcs_obj_dir = os.path.abspath(rcs_obj)
else:
rcs_obj_dir = os.path.dirname(os.path.abspath(rcs_obj))
if os.path.isdir(os.path.join(rcs_obj_dir, self.RCS_METADIR)):
# is there a metadir next to the rcs_obj?
# (for Subversion, CVS, ...)
location_abs = os.path.abspath(rcs_obj)
location_rel = os.path.basename(location_abs)
return (rcs_obj_dir, location_abs, location_rel)
elif self.SCAN_PARENTS:
# scan for the metadir in parent directories
# (for bzr, GIT, Darcs, ...)
return self._find_rcs_in_parent_directories(rcs_obj, oldest_parent)
else:
# no RCS metadata found
return None
def _find_rcs_in_parent_directories(self, rcs_obj, oldest_parent=None):
"""Try to find the metadata directory in all parent directories"""
# first: resolve possible symlinks
current_dir = os.path.dirname(os.path.realpath(rcs_obj))
# prevent infite loops
max_depth = 8
if oldest_parent:
oldest_parent = os.path.normpath(oldest_parent)
# stop as soon as we find the metadata directory
while not os.path.isdir(os.path.join(current_dir, self.RCS_METADIR)):
if current_dir == oldest_parent:
# we were instructed not to look higher up
return None
parent_dir = os.path.dirname(current_dir)
if parent_dir == current_dir:
# we reached the root directory - stop
return None
if max_depth <= 0:
# some kind of dead loop or a _very_ deep directory structure
return None
# go to the next higher level
current_dir = parent_dir
max_depth -= 1
# the loop was finished successfully
# i.e.: we found the metadata directory
rcs_dir = current_dir
location_abs = os.path.realpath(rcs_obj)
# strip the base directory from the path of the rcs_obj
basedir = rcs_dir + os.path.sep
if location_abs.startswith(basedir):
# remove the base directory (including the trailing slash)
location_rel = location_abs.replace(basedir, "", 1)
# successfully finished
return (rcs_dir, location_abs, location_rel)
else:
# this should never happen
return None
def _self_check(self):
"""Check if all necessary attributes are defined
Useful to make sure, that a new implementation does not forget
something like :attr:`RCS_METADIR`
"""
if self.RCS_METADIR is None:
raise IOError("Incomplete RCS interface implementation: " \
+ "self.RCS_METADIR is None")
if self.SCAN_PARENTS is None:
raise IOError("Incomplete RCS interface implementation: " \
+ "self.SCAN_PARENTS is None")
# we do not check for implemented functions - they raise
# NotImplementedError exceptions anyway
return True
def _file_exists(self, path):
"""Method to check if a file exists ``in the repository``."""
# If getcleanfile() worked, we assume the file exits. Implementations
# can provide optimised versions.
return bool(self.getcleanfile())
def getcleanfile(self, revision=None):
"""Dummy to be overridden by real implementations"""
raise NotImplementedError("Incomplete RCS interface implementation:" \
+ " 'getcleanfile' is missing")
def commit(self, message=None, author=None):
"""Dummy to be overridden by real implementations"""
raise NotImplementedError("Incomplete RCS interface implementation:" \
+ " 'commit' is missing")
def add(self, files, message=None, author=None):
"""Dummy to be overridden by real implementations"""
raise NotImplementedError("Incomplete RCS interface implementation:" \
+ " 'add' is missing")
def update(self, revision=None, needs_revert=True):
"""Dummy to be overridden by real implementations"""
raise NotImplementedError("Incomplete RCS interface implementation:" \
+ " 'update' is missing")
def get_versioned_objects_recursive(
location,
versioning_systems=None,
follow_symlinks=True):
"""return a list of objects, each pointing to a file below this directory
"""
rcs_objs = []
if versioning_systems is None:
versioning_systems = DEFAULT_RCS[:]
def scan_directory(arg, dirname, fnames):
for fname in fnames:
full_fname = os.path.join(dirname, fname)
if os.path.isfile(full_fname):
try:
rcs_objs.append(get_versioned_object(full_fname,
versioning_systems, follow_symlinks))
except IOError:
pass
os.path.walk(location, scan_directory, None)
return rcs_objs
def get_versioned_object(
location,
versioning_systems=None,
follow_symlinks=True,
oldest_parent=None,
):
"""return a versioned object for the given file"""
if versioning_systems is None:
versioning_systems = DEFAULT_RCS[:]
# go through all RCS and return a versioned object if possible
possible_ver_objs = []
for vers_sys in versioning_systems:
try:
vers_sys_class = __get_rcs_class(vers_sys)
if vers_sys_class is None:
continue
ver_obj = vers_sys_class(location, oldest_parent)
if not ver_obj.SCAN_PARENTS:
return ver_obj
possible_ver_objs.append(ver_obj)
except IOError:
continue
# if we find any RCS, return the one with shorted rel path
if len(possible_ver_objs):
possible_ver_objs.sort(key=lambda ver_obj: len(ver_obj.location_rel))
return possible_ver_objs[0]
# if 'location' is a symlink, then we should try the original file
if follow_symlinks and os.path.islink(location):
return get_versioned_object(os.path.realpath(location),
versioning_systems=versioning_systems,
follow_symlinks=False)
# if everything fails:
raise IOError("Could not find version control information: %s" % location)
def get_available_version_control_systems():
""" return the class objects of all locally available version control
systems
"""
result = []
for rcs in DEFAULT_RCS:
rcs_class = __get_rcs_class(rcs)
if rcs_class:
result.append(rcs_class)
return result
# stay compatible to the previous version
def updatefile(filename):
return get_versioned_object(filename).update()
def getcleanfile(filename, revision=None):
return get_versioned_object(filename).getcleanfile(revision)
def commitfile(filename, message=None, author=None):
return get_versioned_object(filename).commit(message=message,
author=author)
def commitdirectory(directory, message=None, author=None):
"""Commit all files below the given directory.
Files that are just symlinked into the directory are supported, too
"""
# for now all files are committed separately
# should we combine them into one commit?
for rcs_obj in get_versioned_objects_recursive(directory):
rcs_obj.commit(message=message, author=author)
def updatedirectory(directory):
"""Update all files below the given directory.
Files that are just symlinked into the directory are supported, too
"""
# for now all files are updated separately
# should we combine them into one update?
for rcs_obj in get_versioned_objects_recursive(directory):
rcs_obj.update()
def hasversioning(item, oldest_parent=None):
try:
# try all available version control systems
get_versioned_object(item, oldest_parent=oldest_parent)
return True
except IOError:
return False
if __name__ == "__main__":
import sys
filenames = sys.argv[1:]
if filenames:
# try to retrieve the given (local) file from a repository
for filename in filenames:
contents = getcleanfile(filename)
sys.stdout.write("\n\n******** %s ********\n\n" % filename)
sys.stdout.write(contents)
else:
# first: make sure, that the translate toolkit is available
# (useful if "python __init__.py" was called without an appropriate
# PYTHONPATH)
import translate.storage.versioncontrol
# print the names of locally available version control systems
for rcs in get_available_version_control_systems():
print rcs
| |
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume interface (1.1 extension).
"""
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
import six
from cinderclient import base
class Volume(base.Resource):
"""A volume is an extra block level storage to the OpenStack instances."""
def __repr__(self):
return "<Volume: %s>" % self.id
def delete(self):
"""Delete this volume."""
self.manager.delete(self)
def update(self, **kwargs):
"""Update the display_name or display_description for this volume."""
self.manager.update(self, **kwargs)
def attach(self, instance_uuid, mountpoint, mode='rw'):
"""Set attachment metadata.
:param instance_uuid: uuid of the attaching instance.
:param mountpoint: mountpoint on the attaching instance.
:param mode: the access mode
"""
return self.manager.attach(self, instance_uuid, mountpoint, mode)
def detach(self):
"""Clear attachment metadata."""
return self.manager.detach(self)
def reserve(self, volume):
"""Reserve this volume."""
return self.manager.reserve(self)
def unreserve(self, volume):
"""Unreserve this volume."""
return self.manager.unreserve(self)
def begin_detaching(self, volume):
"""Begin detaching volume."""
return self.manager.begin_detaching(self)
def roll_detaching(self, volume):
"""Roll detaching volume."""
return self.manager.roll_detaching(self)
def initialize_connection(self, volume, connector):
"""Initialize a volume connection.
:param connector: connector dict from nova.
"""
return self.manager.initialize_connection(self, connector)
def terminate_connection(self, volume, connector):
"""Terminate a volume connection.
:param connector: connector dict from nova.
"""
return self.manager.terminate_connection(self, connector)
def set_metadata(self, volume, metadata):
"""Set or Append metadata to a volume.
:param volume : The :class: `Volume` to set metadata on
:param metadata: A dict of key/value pairs to set
"""
return self.manager.set_metadata(self, metadata)
def upload_to_image(self, force, image_name, container_format,
disk_format):
"""Upload a volume to image service as an image."""
return self.manager.upload_to_image(self, force, image_name,
container_format, disk_format)
def force_delete(self):
"""Delete the specified volume ignoring its current state.
:param volume: The UUID of the volume to force-delete.
"""
self.manager.force_delete(self)
def reset_state(self, state):
"""Update the volume with the provided state."""
self.manager.reset_state(self, state)
def extend(self, volume, new_size):
"""Extend the size of the specified volume.
:param volume: The UUID of the volume to extend.
:param new_size: The desired size to extend volume to.
"""
self.manager.extend(self, volume, new_size)
def migrate_volume(self, host, force_host_copy):
"""Migrate the volume to a new host."""
self.manager.migrate_volume(self, host, force_host_copy)
# def migrate_volume_completion(self, old_volume, new_volume, error):
# """Complete the migration of the volume."""
# self.manager.migrate_volume_completion(self, old_volume,
# new_volume, error)
def update_all_metadata(self, metadata):
"""Update all metadata of this volume."""
return self.manager.update_all_metadata(self, metadata)
def update_readonly_flag(self, volume, read_only):
"""Update the read-only access mode flag of the specified volume.
:param volume: The UUID of the volume to update.
:param read_only: The value to indicate whether to update volume to
read-only access mode.
"""
self.manager.update_readonly_flag(self, volume, read_only)
class VolumeManager(base.ManagerWithFind):
"""
Manage :class:`Volume` resources.
"""
resource_class = Volume
def create(self, size, snapshot_id=None, source_volid=None,
display_name=None, display_description=None,
volume_type=None, user_id=None,
project_id=None, availability_zone=None,
metadata=None, imageRef=None):
"""
Create a volume.
:param size: Size of volume in GB
:param snapshot_id: ID of the snapshot
:param display_name: Name of the volume
:param display_description: Description of the volume
:param volume_type: Type of volume
:param user_id: User id derived from context
:param project_id: Project id derived from context
:param availability_zone: Availability Zone to use
:param metadata: Optional metadata to set on volume creation
:param imageRef: reference to an image stored in glance
:param source_volid: ID of source volume to clone from
:rtype: :class:`Volume`
"""
if metadata is None:
volume_metadata = {}
else:
volume_metadata = metadata
body = {'volume': {'size': size,
'snapshot_id': snapshot_id,
'display_name': display_name,
'display_description': display_description,
'volume_type': volume_type,
'user_id': user_id,
'project_id': project_id,
'availability_zone': availability_zone,
'status': "creating",
'attach_status': "detached",
'metadata': volume_metadata,
'imageRef': imageRef,
'source_volid': source_volid,
}}
return self._create('/volumes', body, 'volume')
def get(self, volume_id):
"""
Get a volume.
:param volume_id: The ID of the volume to get.
:rtype: :class:`Volume`
"""
return self._get("/volumes/%s" % volume_id, "volume")
def list(self, detailed=True, search_opts=None):
"""
Get a list of all volumes.
:rtype: list of :class:`Volume`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/volumes%s%s" % (detail, query_string),
"volumes")
def delete(self, volume):
"""
Delete a volume.
:param volume: The :class:`Volume` to delete.
"""
self._delete("/volumes/%s" % base.getid(volume))
def update(self, volume, **kwargs):
"""
Update the display_name or display_description for a volume.
:param volume: The :class:`Volume` to update.
"""
if not kwargs:
return
body = {"volume": kwargs}
self._update("/volumes/%s" % base.getid(volume), body)
def _action(self, action, volume, info=None, **kwargs):
"""
Perform a volume "action."
"""
body = {action: info}
self.run_hooks('modify_body_for_action', body, **kwargs)
url = '/volumes/%s/action' % base.getid(volume)
return self.api.client.post(url, body=body)
def attach(self, volume, instance_uuid, mountpoint, mode='rw'):
"""
Set attachment metadata.
:param volume: The :class:`Volume` (or its ID)
you would like to attach.
:param instance_uuid: uuid of the attaching instance.
:param mountpoint: mountpoint on the attaching instance.
:param mode: the access mode.
"""
return self._action('os-attach',
volume,
{'instance_uuid': instance_uuid,
'mountpoint': mountpoint,
'mode': mode})
def detach(self, volume):
"""
Clear attachment metadata.
:param volume: The :class:`Volume` (or its ID)
you would like to detach.
"""
return self._action('os-detach', volume)
def reserve(self, volume):
"""
Reserve this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to reserve.
"""
return self._action('os-reserve', volume)
def unreserve(self, volume):
"""
Unreserve this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to unreserve.
"""
return self._action('os-unreserve', volume)
def begin_detaching(self, volume):
"""
Begin detaching this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to detach.
"""
return self._action('os-begin_detaching', volume)
def roll_detaching(self, volume):
"""
Roll detaching this volume.
:param volume: The :class:`Volume` (or its ID)
you would like to roll detaching.
"""
return self._action('os-roll_detaching', volume)
def initialize_connection(self, volume, connector):
"""
Initialize a volume connection.
:param volume: The :class:`Volume` (or its ID).
:param connector: connector dict from nova.
"""
return self._action('os-initialize_connection', volume,
{'connector': connector})[1]['connection_info']
def terminate_connection(self, volume, connector):
"""
Terminate a volume connection.
:param volume: The :class:`Volume` (or its ID).
:param connector: connector dict from nova.
"""
self._action('os-terminate_connection', volume,
{'connector': connector})
def set_metadata(self, volume, metadata):
"""
Update/Set a volumes metadata.
:param volume: The :class:`Volume`.
:param metadata: A list of keys to be set.
"""
body = {'metadata': metadata}
return self._create("/volumes/%s/metadata" % base.getid(volume),
body, "metadata")
def delete_metadata(self, volume, keys):
"""
Delete specified keys from volumes metadata.
:param volume: The :class:`Volume`.
:param keys: A list of keys to be removed.
"""
for k in keys:
self._delete("/volumes/%s/metadata/%s" % (base.getid(volume), k))
def upload_to_image(self, volume, force, image_name, container_format,
disk_format):
"""
Upload volume to image service as image.
:param volume: The :class:`Volume` to upload.
"""
return self._action('os-volume_upload_image',
volume,
{'force': force,
'image_name': image_name,
'container_format': container_format,
'disk_format': disk_format})
def force_delete(self, volume):
return self._action('os-force_delete', base.getid(volume))
def reset_state(self, volume, state):
"""Update the provided volume with the provided state."""
return self._action('os-reset_status', volume, {'status': state})
def extend(self, volume, new_size):
return self._action('os-extend',
base.getid(volume),
{'new_size': new_size})
def get_encryption_metadata(self, volume_id):
"""
Retrieve the encryption metadata from the desired volume.
:param volume_id: the id of the volume to query
:return: a dictionary of volume encryption metadata
"""
return self._get("/volumes/%s/encryption" % volume_id)._info
def migrate_volume(self, volume, host, force_host_copy):
"""Migrate volume to new host.
:param volume: The :class:`Volume` to migrate
:param host: The destination host
:param force_host_copy: Skip driver optimizations
"""
return self._action('os-migrate_volume',
volume,
{'host': host, 'force_host_copy': force_host_copy})
def migrate_volume_completion(self, old_volume, new_volume, error):
"""Complete the migration from the old volume to the temp new one.
:param old_volume: The original :class:`Volume` in the migration
:param new_volume: The new temporary :class:`Volume` in the migration
:param error: Inform of an error to cause migration cleanup
"""
new_volume_id = base.getid(new_volume)
return self._action('os-migrate_volume_completion',
old_volume,
{'new_volume': new_volume_id, 'error': error})[1]
def update_all_metadata(self, volume, metadata):
"""Update all metadata of a volume.
:param volume: The :class:`Volume`.
:param metadata: A list of keys to be updated.
"""
body = {'metadata': metadata}
return self._update("/volumes/%s/metadata" % base.getid(volume),
body)
def update_readonly_flag(self, volume, flag):
return self._action('os-update_readonly_flag',
base.getid(volume),
{'readonly': flag})
| |
# Authors: Travis Oliphant, Matthew Brett
"""
Base classes for MATLAB file stream reading.
MATLAB is a registered trademark of the Mathworks inc.
"""
from __future__ import division, print_function, absolute_import
import sys
import operator
from scipy._lib.six import reduce
import numpy as np
if sys.version_info[0] >= 3:
byteord = int
else:
byteord = ord
from scipy.misc import doccer
from . import byteordercodes as boc
class MatReadError(Exception):
pass
class MatWriteError(Exception):
pass
class MatReadWarning(UserWarning):
pass
doc_dict = \
{'file_arg':
'''file_name : str
Name of the mat file (do not need .mat extension if
appendmat==True) Can also pass open file-like object.''',
'append_arg':
'''appendmat : bool, optional
True to append the .mat extension to the end of the given
filename, if not already present.''',
'load_args':
'''byte_order : str or None, optional
None by default, implying byte order guessed from mat
file. Otherwise can be one of ('native', '=', 'little', '<',
'BIG', '>').
mat_dtype : bool, optional
If True, return arrays in same dtype as would be loaded into
MATLAB (instead of the dtype with which they are saved).
squeeze_me : bool, optional
Whether to squeeze unit matrix dimensions or not.
chars_as_strings : bool, optional
Whether to convert char arrays to string arrays.
matlab_compatible : bool, optional
Returns matrices as would be loaded by MATLAB (implies
squeeze_me=False, chars_as_strings=False, mat_dtype=True,
struct_as_record=True).''',
'struct_arg':
'''struct_as_record : bool, optional
Whether to load MATLAB structs as numpy record arrays, or as
old-style numpy arrays with dtype=object. Setting this flag to
False replicates the behavior of scipy version 0.7.x (returning
numpy object arrays). The default setting is True, because it
allows easier round-trip load and save of MATLAB files.''',
'matstream_arg':
'''mat_stream : file-like
Object with file API, open for reading.''',
'long_fields':
'''long_field_names : bool, optional
* False - maximum field name length in a structure is 31 characters
which is the documented maximum length. This is the default.
* True - maximum field name length in a structure is 63 characters
which works for MATLAB 7.6''',
'do_compression':
'''do_compression : bool, optional
Whether to compress matrices on write. Default is False.''',
'oned_as':
'''oned_as : {'row', 'column'}, optional
If 'column', write 1-D numpy arrays as column vectors.
If 'row', write 1D numpy arrays as row vectors.''',
'unicode_strings':
'''unicode_strings : bool, optional
If True, write strings as Unicode, else MATLAB usual encoding.'''}
docfiller = doccer.filldoc(doc_dict)
'''
Note on architecture
======================
There are three sets of parameters relevant for reading files. The
first are *file read parameters* - containing options that are common
for reading the whole file, and therefore every variable within that
file. At the moment these are:
* mat_stream
* dtypes (derived from byte code)
* byte_order
* chars_as_strings
* squeeze_me
* struct_as_record (MATLAB 5 files)
* class_dtypes (derived from order code, MATLAB 5 files)
* codecs (MATLAB 5 files)
* uint16_codec (MATLAB 5 files)
Another set of parameters are those that apply only to the current
variable being read - the *header*:
* header related variables (different for v4 and v5 mat files)
* is_complex
* mclass
* var_stream
With the header, we need ``next_position`` to tell us where the next
variable in the stream is.
Then, for each element in a matrix, there can be *element read
parameters*. An element is, for example, one element in a MATLAB cell
array. At the moment these are:
* mat_dtype
The file-reading object contains the *file read parameters*. The
*header* is passed around as a data object, or may be read and discarded
in a single function. The *element read parameters* - the mat_dtype in
this instance, is passed into a general post-processing function - see
``mio_utils`` for details.
'''
def convert_dtypes(dtype_template, order_code):
''' Convert dtypes in mapping to given order
Parameters
----------
dtype_template : mapping
mapping with values returning numpy dtype from ``np.dtype(val)``
order_code : str
an order code suitable for using in ``dtype.newbyteorder()``
Returns
-------
dtypes : mapping
mapping where values have been replaced by
``np.dtype(val).newbyteorder(order_code)``
'''
dtypes = dtype_template.copy()
for k in dtypes:
dtypes[k] = np.dtype(dtypes[k]).newbyteorder(order_code)
return dtypes
def read_dtype(mat_stream, a_dtype):
"""
Generic get of byte stream data of known type
Parameters
----------
mat_stream : file_like object
MATLAB (tm) mat file stream
a_dtype : dtype
dtype of array to read. `a_dtype` is assumed to be correct
endianness.
Returns
-------
arr : ndarray
Array of dtype `a_dtype` read from stream.
"""
num_bytes = a_dtype.itemsize
arr = np.ndarray(shape=(),
dtype=a_dtype,
buffer=mat_stream.read(num_bytes),
order='F')
return arr
def get_matfile_version(fileobj):
"""
Return major, minor tuple depending on apparent mat file type
Where:
#. 0,x -> version 4 format mat files
#. 1,x -> version 5 format mat files
#. 2,x -> version 7.3 format mat files (HDF format)
Parameters
----------
fileobj : file_like
object implementing seek() and read()
Returns
-------
major_version : {0, 1, 2}
major MATLAB File format version
minor_version : int
minor MATLAB file format version
Raises
------
MatReadError
If the file is empty.
ValueError
The matfile version is unknown.
Notes
-----
Has the side effect of setting the file read pointer to 0
"""
# Mat4 files have a zero somewhere in first 4 bytes
fileobj.seek(0)
mopt_bytes = fileobj.read(4)
if len(mopt_bytes) == 0:
raise MatReadError("Mat file appears to be empty")
mopt_ints = np.ndarray(shape=(4,), dtype=np.uint8, buffer=mopt_bytes)
if 0 in mopt_ints:
fileobj.seek(0)
return (0,0)
# For 5 format or 7.3 format we need to read an integer in the
# header. Bytes 124 through 128 contain a version integer and an
# endian test string
fileobj.seek(124)
tst_str = fileobj.read(4)
fileobj.seek(0)
maj_ind = int(tst_str[2] == b'I'[0])
maj_val = byteord(tst_str[maj_ind])
min_val = byteord(tst_str[1-maj_ind])
ret = (maj_val, min_val)
if maj_val in (1, 2):
return ret
raise ValueError('Unknown mat file type, version %s, %s' % ret)
def matdims(arr, oned_as='column'):
"""
Determine equivalent MATLAB dimensions for given array
Parameters
----------
arr : ndarray
Input array
oned_as : {'column', 'row'}, optional
Whether 1-D arrays are returned as MATLAB row or column matrices.
Default is 'column'.
Returns
-------
dims : tuple
Shape tuple, in the form MATLAB expects it.
Notes
-----
We had to decide what shape a 1 dimensional array would be by
default. ``np.atleast_2d`` thinks it is a row vector. The
default for a vector in MATLAB (e.g. ``>> 1:12``) is a row vector.
Versions of scipy up to and including 0.11 resulted (accidentally)
in 1-D arrays being read as column vectors. For the moment, we
maintain the same tradition here.
Examples
--------
>>> matdims(np.array(1)) # numpy scalar
(1, 1)
>>> matdims(np.array([1])) # 1d array, 1 element
(1, 1)
>>> matdims(np.array([1,2])) # 1d array, 2 elements
(2, 1)
>>> matdims(np.array([[2],[3]])) # 2d array, column vector
(2, 1)
>>> matdims(np.array([[2,3]])) # 2d array, row vector
(1, 2)
>>> matdims(np.array([[[2,3]]])) # 3d array, rowish vector
(1, 1, 2)
>>> matdims(np.array([])) # empty 1d array
(0, 0)
>>> matdims(np.array([[]])) # empty 2d
(0, 0)
>>> matdims(np.array([[[]]])) # empty 3d
(0, 0, 0)
Optional argument flips 1-D shape behavior.
>>> matdims(np.array([1,2]), 'row') # 1d array, 2 elements
(1, 2)
The argument has to make sense though
>>> matdims(np.array([1,2]), 'bizarre')
Traceback (most recent call last):
...
ValueError: 1D option "bizarre" is strange
"""
shape = arr.shape
if shape == (): # scalar
return (1,1)
if reduce(operator.mul, shape) == 0: # zero elememts
return (0,) * np.max([arr.ndim, 2])
if len(shape) == 1: # 1D
if oned_as == 'column':
return shape + (1,)
elif oned_as == 'row':
return (1,) + shape
else:
raise ValueError('1D option "%s" is strange'
% oned_as)
return shape
class MatVarReader(object):
''' Abstract class defining required interface for var readers'''
def __init__(self, file_reader):
pass
def read_header(self):
''' Returns header '''
pass
def array_from_header(self, header):
''' Reads array given header '''
pass
class MatFileReader(object):
""" Base object for reading mat files
To make this class functional, you will need to override the
following methods:
matrix_getter_factory - gives object to fetch next matrix from stream
guess_byte_order - guesses file byte order from file
"""
@docfiller
def __init__(self, mat_stream,
byte_order=None,
mat_dtype=False,
squeeze_me=False,
chars_as_strings=True,
matlab_compatible=False,
struct_as_record=True,
verify_compressed_data_integrity=True
):
'''
Initializer for mat file reader
mat_stream : file-like
object with file API, open for reading
%(load_args)s
'''
# Initialize stream
self.mat_stream = mat_stream
self.dtypes = {}
if not byte_order:
byte_order = self.guess_byte_order()
else:
byte_order = boc.to_numpy_code(byte_order)
self.byte_order = byte_order
self.struct_as_record = struct_as_record
if matlab_compatible:
self.set_matlab_compatible()
else:
self.squeeze_me = squeeze_me
self.chars_as_strings = chars_as_strings
self.mat_dtype = mat_dtype
self.verify_compressed_data_integrity = verify_compressed_data_integrity
def set_matlab_compatible(self):
''' Sets options to return arrays as MATLAB loads them '''
self.mat_dtype = True
self.squeeze_me = False
self.chars_as_strings = False
def guess_byte_order(self):
''' As we do not know what file type we have, assume native '''
return boc.native_code
def end_of_stream(self):
b = self.mat_stream.read(1)
curpos = self.mat_stream.tell()
self.mat_stream.seek(curpos-1)
return len(b) == 0
def arr_dtype_number(arr, num):
''' Return dtype for given number of items per element'''
return np.dtype(arr.dtype.str[:2] + str(num))
def arr_to_chars(arr):
''' Convert string array to char array '''
dims = list(arr.shape)
if not dims:
dims = [1]
dims.append(int(arr.dtype.str[2:]))
arr = np.ndarray(shape=dims,
dtype=arr_dtype_number(arr, 1),
buffer=arr)
empties = [arr == '']
if not np.any(empties):
return arr
arr = arr.copy()
arr[empties] = ' '
return arr
| |
import copy
from spec import Spec, eq_, skip, ok_, raises
from invoke.parser import Argument, Context
from invoke.tasks import task
from invoke.collection import Collection
class Context_(Spec):
"ParserContext" # meh
def may_have_a_name(self):
c = Context(name='taskname')
eq_(c.name, 'taskname')
def may_have_aliases(self):
c = Context(name='realname', aliases=('othername', 'yup'))
assert 'othername' in c.aliases
def may_give_arg_list_at_init_time(self):
a1 = Argument('foo')
a2 = Argument('bar')
c = Context(name='name', args=(a1, a2))
assert c.args['foo'] is a1
# TODO: reconcile this sort of test organization with the .flags oriented
# tests within 'add_arg'. Some of this behavior is technically driven by
# add_arg.
class args:
def setup(self):
self.c = Context(args=(
Argument('foo'),
Argument(names=('bar', 'biz')),
Argument('baz', attr_name='wat'),
))
def exposed_as_dict(self):
assert 'foo' in self.c.args.keys()
def exposed_as_Lexicon(self):
eq_(self.c.args.bar, self.c.args['bar'])
def args_dict_includes_all_arg_names(self):
for x in ('foo', 'bar', 'biz'):
assert x in self.c.args
def argument_attr_names_appear_in_args_but_not_flags(self):
# Both appear as "Python-facing" args
for x in ('baz', 'wat'):
assert x in self.c.args
# But attr_name is for Python access only and isn't shown to the
# parser.
assert 'wat' not in self.c.flags
class add_arg:
def setup(self):
self.c = Context()
def can_take_Argument_instance(self):
a = Argument(names=('foo',))
self.c.add_arg(a)
assert self.c.args['foo'] is a
def can_take_name_arg(self):
self.c.add_arg('foo')
assert 'foo' in self.c.args
def can_take_kwargs_for_single_Argument(self):
self.c.add_arg(names=('foo', 'bar'))
assert 'foo' in self.c.args and 'bar' in self.c.args
@raises(ValueError)
def raises_ValueError_on_duplicate(self):
self.c.add_arg(names=('foo', 'bar'))
self.c.add_arg(name='bar')
def adds_flaglike_name_to_dot_flags(self):
"adds flaglike name to .flags"
self.c.add_arg('foo')
assert '--foo' in self.c.flags
def adds_all_names_to_dot_flags(self):
"adds all names to .flags"
self.c.add_arg(names=('foo', 'bar'))
assert '--foo' in self.c.flags
assert '--bar' in self.c.flags
def adds_true_bools_to_inverse_flags(self):
self.c.add_arg(name='myflag', default=True, kind=bool)
assert '--myflag' in self.c.flags
assert '--no-myflag' in self.c.inverse_flags
eq_(self.c.inverse_flags['--no-myflag'], '--myflag')
def inverse_flags_works_right_with_task_driven_underscored_names(self):
# Use a Task here instead of creating a raw argument, we're partly
# testing Task.get_arguments()' transform of underscored names
# here. Yes that makes this an integration test, but it's nice to
# test it here at this level & not just in cli tests.
@task
def mytask(underscored_option=True):
pass
self.c.add_arg(mytask.get_arguments()[0])
eq_(
self.c.inverse_flags['--no-underscored-option'],
'--underscored-option'
)
def turns_single_character_names_into_short_flags(self):
self.c.add_arg('f')
assert '-f' in self.c.flags
assert '--f' not in self.c.flags
def adds_positional_args_to_positional_args(self):
self.c.add_arg(name='pos', positional=True)
eq_(self.c.positional_args[0].name, 'pos')
def positional_args_empty_when_none_given(self):
eq_(len(self.c.positional_args), 0)
def positional_args_filled_in_order(self):
self.c.add_arg(name='pos1', positional=True)
eq_(self.c.positional_args[0].name, 'pos1')
self.c.add_arg(name='abc', positional=True)
eq_(self.c.positional_args[1].name, 'abc')
def positional_arg_modifications_affect_args_copy(self):
self.c.add_arg(name='hrm', positional=True)
eq_(self.c.args['hrm'].value, self.c.positional_args[0].value)
self.c.positional_args[0].value = 17
eq_(self.c.args['hrm'].value, self.c.positional_args[0].value)
class deepcopy:
"__deepcopy__"
def setup(self):
self.arg = Argument('--boolean')
self.orig = Context(
name='mytask',
args=(self.arg,),
aliases=('othername',)
)
self.new = copy.deepcopy(self.orig)
def returns_correct_copy(self):
assert self.new is not self.orig
eq_(self.new.name, 'mytask')
assert 'othername' in self.new.aliases
def includes_arguments(self):
eq_(len(self.new.args), 1)
assert self.new.args['--boolean'] is not self.arg
def modifications_to_copied_arguments_do_not_touch_originals(self):
new_arg = self.new.args['--boolean']
new_arg.value = True
assert new_arg.value
assert not self.arg.value
class help_for:
def setup(self):
# Normal, non-task/collection related Context
self.vanilla = Context(args=(
Argument('foo'),
Argument('bar', help="bar the baz")
))
# Task/Collection generated Context
# (will expose flags n such)
@task(help={'otherarg': 'other help'}, optional=['optval'])
def mytask(myarg, otherarg, optval):
pass
col = Collection(mytask)
self.tasked = col.to_contexts()[0]
@raises(ValueError)
def raises_ValueError_for_non_flag_values(self):
self.vanilla.help_for('foo')
def vanilla_no_helpstr(self):
eq_(
self.vanilla.help_for('--foo'),
("--foo=STRING", "")
)
def vanilla_with_helpstr(self):
eq_(
self.vanilla.help_for('--bar'),
("--bar=STRING", "bar the baz")
)
def task_driven_with_helpstr(self):
eq_(
self.tasked.help_for('--otherarg'),
("-o STRING, --otherarg=STRING", "other help")
)
# Yes, the next 3 tests are identical in form, but technically they
# test different behaviors. HERPIN' AN' DERPIN'
def task_driven_no_helpstr(self):
eq_(
self.tasked.help_for('--myarg'),
("-m STRING, --myarg=STRING", "")
)
def short_form_before_long_form(self):
eq_(
self.tasked.help_for('--myarg'),
("-m STRING, --myarg=STRING", "")
)
def equals_sign_for_long_form_only(self):
eq_(
self.tasked.help_for('--myarg'),
("-m STRING, --myarg=STRING", "")
)
def kind_to_placeholder_map(self):
# str=STRING, int=INT, etc etc
skip()
def shortflag_inputs_work_too(self):
eq_(self.tasked.help_for('-m'), self.tasked.help_for('--myarg'))
def optional_values_use_brackets(self):
eq_(
self.tasked.help_for('--optval'),
("-p [STRING], --optval[=STRING]", "")
)
def underscored_args(self):
c = Context(args=(Argument('i_have_underscores', help='yup'),))
eq_(c.help_for('--i-have-underscores'), ('--i-have-underscores=STRING', 'yup'))
class help_tuples:
def returns_list_of_help_tuples(self):
# Walks own list of flags/args, ensures resulting map to help_for()
# TODO: consider redoing help_for to be more flexible on input --
# arg value or flag; or even Argument objects. ?
@task(help={'otherarg': 'other help'})
def mytask(myarg, otherarg):
pass
c = Collection(mytask).to_contexts()[0]
eq_(
c.help_tuples(),
[c.help_for('--myarg'), c.help_for('--otherarg')]
)
def _assert_order(self, name_tuples, expected_flag_order):
ctx = Context(args=[Argument(names=x) for x in name_tuples])
return eq_(
ctx.help_tuples(),
[ctx.help_for(x) for x in expected_flag_order]
)
def sorts_alphabetically_by_shortflag_first(self):
# Where shortflags exist, they take precedence
self._assert_order(
[('zarg', 'a'), ('arg', 'z')],
['--zarg', '--arg']
)
def case_ignored_during_sorting(self):
self._assert_order(
[('a',), ('B',)],
# In raw cmp() uppercase would come before lowercase,
# and we'd get ['-B', '-a']
['-a', '-B']
)
def lowercase_wins_when_values_identical_otherwise(self):
self._assert_order(
[('V',), ('v',)],
['-v', '-V']
)
def sorts_alphabetically_by_longflag_when_no_shortflag(self):
# Where no shortflag, sorts by longflag
self._assert_order(
[('otherarg',), ('longarg',)],
['--longarg', '--otherarg']
)
def sorts_heterogenous_help_output_with_longflag_only_options_first(self):
# When both of the above mix, long-flag-only options come first.
# E.g.:
# --alpha
# --beta
# -a, --aaaagh
# -b, --bah
# -c
self._assert_order(
[('c',), ('a', 'aaagh'), ('b', 'bah'), ('beta',), ('alpha',)],
['--alpha', '--beta', '-a', '-b', '-c']
)
def mixed_corelike_options(self):
self._assert_order(
[('V', 'version'), ('c', 'collection'), ('h', 'help'),
('l', 'list'), ('r', 'root')],
['-c', '-h', '-l', '-r', '-V']
)
class needs_positional_arg:
def represents_whether_all_positional_args_have_values(self):
c = Context(name='foo', args=(
Argument('arg1', positional=True),
Argument('arg2', positional=False),
Argument('arg3', positional=True),
))
eq_(c.needs_positional_arg, True)
c.positional_args[0].value = 'wat'
eq_(c.needs_positional_arg, True)
c.positional_args[1].value = 'hrm'
eq_(c.needs_positional_arg, False)
class str:
"__str__"
def with_no_args_output_is_simple(self):
eq_(str(Context('foo')), "<Context 'foo'>")
def args_show_as_repr(self):
eq_(
str(Context('bar', args=[Argument('arg1')])),
"<Context 'bar': {'arg1': <Argument: arg1>}>"
)
def repr_is_str(self):
"__repr__ mirrors __str__"
c = Context('foo')
eq_(str(c), repr(c))
| |
# Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from opencensus.trace.propagation import google_cloud_format
class TestGoogleCloudFormatPropagator(unittest.TestCase):
def test_from_header_no_header(self):
from opencensus.trace.span_context import SpanContext
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(None)
assert isinstance(span_context, SpanContext)
def test_from_headers_none(self):
from opencensus.trace.span_context import SpanContext
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_headers(None)
assert isinstance(span_context, SpanContext)
def test_from_headers_empty(self):
from opencensus.trace.span_context import SpanContext
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_headers({})
assert isinstance(span_context, SpanContext)
def test_header_type_error(self):
header = 1234
propagator = google_cloud_format.GoogleCloudFormatPropagator()
with self.assertRaises(TypeError):
propagator.from_header(header)
def test_header_match(self):
# Trace option is not enabled.
header = '6e0c63257de34c92bf9efcd03927272e/67667974448284343;o=0'
expected_trace_id = '6e0c63257de34c92bf9efcd03927272e'
expected_span_id = '00f067aa0ba902b7'
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(header)
self.assertEqual(span_context.trace_id, expected_trace_id)
self.assertEqual(span_context.span_id, expected_span_id)
self.assertFalse(span_context.trace_options.enabled)
# Trace option is enabled.
header = '6e0c63257de34c92bf9efcd03927272e/67667974448284343;o=1'
expected_trace_id = '6e0c63257de34c92bf9efcd03927272e'
expected_span_id = '00f067aa0ba902b7'
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(header)
self.assertEqual(span_context.trace_id, expected_trace_id)
self.assertEqual(span_context.span_id, expected_span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_header_match_no_span_id(self):
# Trace option is not enabled.
header = '6e0c63257de34c92bf9efcd03927272e;o=0'
expected_trace_id = '6e0c63257de34c92bf9efcd03927272e'
expected_span_id = None
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(header)
self.assertEqual(span_context.trace_id, expected_trace_id)
self.assertEqual(span_context.span_id, expected_span_id)
self.assertFalse(span_context.trace_options.enabled)
# Trace option is enabled.
header = '6e0c63257de34c92bf9efcd03927272e;o=1'
expected_trace_id = '6e0c63257de34c92bf9efcd03927272e'
expected_span_id = None
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(header)
self.assertEqual(span_context.trace_id, expected_trace_id)
self.assertEqual(span_context.span_id, expected_span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_header_match_empty_span_id(self):
# Trace option is not enabled.
header = '6e0c63257de34c92bf9efcd03927272e/;o=0'
expected_trace_id = '6e0c63257de34c92bf9efcd03927272e'
expected_span_id = None
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(header)
self.assertEqual(span_context.trace_id, expected_trace_id)
self.assertEqual(span_context.span_id, expected_span_id)
self.assertFalse(span_context.trace_options.enabled)
# Trace option is enabled.
header = '6e0c63257de34c92bf9efcd03927272e/;o=1'
expected_trace_id = '6e0c63257de34c92bf9efcd03927272e'
expected_span_id = None
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(header)
self.assertEqual(span_context.trace_id, expected_trace_id)
self.assertEqual(span_context.span_id, expected_span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_header_match_no_option(self):
header = '6e0c63257de34c92bf9efcd03927272e/67667974448284343'
expected_trace_id = '6e0c63257de34c92bf9efcd03927272e'
expected_span_id = '00f067aa0ba902b7'
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(header)
self.assertEqual(span_context.trace_id, expected_trace_id)
self.assertEqual(span_context.span_id, expected_span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_header_not_match(self):
header = 'invalid_trace_id/66666;o=1'
trace_id = 'invalid_trace_id'
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_header(header)
self.assertNotEqual(span_context.trace_id, trace_id)
def test_headers_match(self):
# Trace option is enabled.
headers = {
'X-Cloud-Trace-Context':
'6e0c63257de34c92bf9efcd03927272e/67667974448284343;o=1',
}
expected_trace_id = '6e0c63257de34c92bf9efcd03927272e'
expected_span_id = '00f067aa0ba902b7'
propagator = google_cloud_format.GoogleCloudFormatPropagator()
span_context = propagator.from_headers(headers)
self.assertEqual(span_context.trace_id, expected_trace_id)
self.assertEqual(span_context.span_id, expected_span_id)
self.assertTrue(span_context.trace_options.enabled)
def test_to_header(self):
from opencensus.trace import span_context
from opencensus.trace import trace_options
trace_id = '6e0c63257de34c92bf9efcd03927272e'
span_id = '00f067aa0ba902b7'
span_context = span_context.SpanContext(
trace_id=trace_id,
span_id=span_id,
trace_options=trace_options.TraceOptions('1'))
propagator = google_cloud_format.GoogleCloudFormatPropagator()
header = propagator.to_header(span_context)
expected_header = '{}/{};o={}'.format(
trace_id, int(span_id, 16), 1)
self.assertEqual(header, expected_header)
def test_to_headers(self):
from opencensus.trace import span_context
from opencensus.trace import trace_options
trace_id = '6e0c63257de34c92bf9efcd03927272e'
span_id = '00f067aa0ba902b7'
span_context = span_context.SpanContext(
trace_id=trace_id,
span_id=span_id,
trace_options=trace_options.TraceOptions('1'))
propagator = google_cloud_format.GoogleCloudFormatPropagator()
headers = propagator.to_headers(span_context)
expected_headers = {
'X-Cloud-Trace-Context': '{}/{};o={}'.format(
trace_id, int(span_id, 16), 1),
}
self.assertEqual(headers, expected_headers)
| |
"""The tests for the integration sensor platform."""
from datetime import timedelta
from unittest.mock import patch
from homeassistant.components.sensor import SensorDeviceClass, SensorStateClass
from homeassistant.const import (
ENERGY_KILO_WATT_HOUR,
ENERGY_WATT_HOUR,
POWER_WATT,
STATE_UNKNOWN,
TIME_SECONDS,
)
from homeassistant.core import HomeAssistant, State
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import mock_restore_cache
async def test_state(hass) -> None:
"""Test integration sensor state."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
"unit": ENERGY_KILO_WATT_HOUR,
"round": 2,
}
}
now = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=now):
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 1, {})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert state.attributes.get("state_class") is SensorStateClass.TOTAL
assert "device_class" not in state.attributes
future_now = dt_util.utcnow() + timedelta(seconds=3600)
with patch("homeassistant.util.dt.utcnow", return_value=future_now):
hass.states.async_set(
entity_id, 1, {"device_class": SensorDeviceClass.POWER}, force_update=True
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing a power sensor at 1 KiloWatts for 1hour = 1kWh
assert round(float(state.state), config["sensor"]["round"]) == 1.0
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
assert state.attributes.get("device_class") == SensorDeviceClass.ENERGY
assert state.attributes.get("state_class") is SensorStateClass.TOTAL
async def test_restore_state(hass: HomeAssistant) -> None:
"""Test integration sensor state is restored correctly."""
mock_restore_cache(
hass,
(
State(
"sensor.integration",
"100.0",
{
"device_class": SensorDeviceClass.ENERGY,
"unit_of_measurement": ENERGY_KILO_WATT_HOUR,
},
),
),
)
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
"round": 2,
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state
assert state.state == "100.00"
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
assert state.attributes.get("device_class") == SensorDeviceClass.ENERGY
async def test_restore_state_failed(hass: HomeAssistant) -> None:
"""Test integration sensor state is restored correctly."""
mock_restore_cache(
hass,
(
State(
"sensor.integration",
"INVALID",
{
"last_reset": "2019-10-06T21:00:00.000000",
},
),
),
)
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
}
}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state
assert state.state == "unknown"
assert state.attributes.get("unit_of_measurement") is None
assert state.attributes.get("state_class") is SensorStateClass.TOTAL
assert "device_class" not in state.attributes
async def test_trapezoidal(hass):
"""Test integration sensor state."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
"unit": ENERGY_KILO_WATT_HOUR,
"round": 2,
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 0, {})
await hass.async_block_till_done()
# Testing a power sensor with non-monotonic intervals and values
for time, value in [(20, 10), (30, 30), (40, 5), (50, 0)]:
now = dt_util.utcnow() + timedelta(minutes=time)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(entity_id, value, {}, force_update=True)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert round(float(state.state), config["sensor"]["round"]) == 8.33
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
async def test_left(hass):
"""Test integration sensor state with left reimann method."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"method": "left",
"source": "sensor.power",
"unit": ENERGY_KILO_WATT_HOUR,
"round": 2,
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 0, {})
await hass.async_block_till_done()
# Testing a power sensor with non-monotonic intervals and values
for time, value in [(20, 10), (30, 30), (40, 5), (50, 0)]:
now = dt_util.utcnow() + timedelta(minutes=time)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(entity_id, value, {}, force_update=True)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert round(float(state.state), config["sensor"]["round"]) == 7.5
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
async def test_right(hass):
"""Test integration sensor state with left reimann method."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"method": "right",
"source": "sensor.power",
"unit": ENERGY_KILO_WATT_HOUR,
"round": 2,
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 0, {})
await hass.async_block_till_done()
# Testing a power sensor with non-monotonic intervals and values
for time, value in [(20, 10), (30, 30), (40, 5), (50, 0)]:
now = dt_util.utcnow() + timedelta(minutes=time)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(entity_id, value, {}, force_update=True)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert round(float(state.state), config["sensor"]["round"]) == 9.17
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
async def test_prefix(hass):
"""Test integration sensor state using a power source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
"round": 2,
"unit_prefix": "k",
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 1000, {"unit_of_measurement": POWER_WATT})
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=3600)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(
entity_id, 1000, {"unit_of_measurement": POWER_WATT}, force_update=True
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing a power sensor at 1000 Watts for 1hour = 1kWh
assert round(float(state.state), config["sensor"]["round"]) == 1.0
assert state.attributes.get("unit_of_measurement") == ENERGY_KILO_WATT_HOUR
async def test_suffix(hass):
"""Test integration sensor state using a network counter source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.bytes_per_second",
"round": 2,
"unit_prefix": "k",
"unit_time": TIME_SECONDS,
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, 1000, {})
await hass.async_block_till_done()
now = dt_util.utcnow() + timedelta(seconds=10)
with patch("homeassistant.util.dt.utcnow", return_value=now):
hass.states.async_set(entity_id, 1000, {}, force_update=True)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing a network speed sensor at 1000 bytes/s over 10s = 10kbytes
assert round(float(state.state)) == 10
async def test_units(hass):
"""Test integration sensor units using a power source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
# This replicates the current sequence when HA starts up in a real runtime
# by updating the base sensor state before the base sensor's units
# or state have been correctly populated. Those interim updates
# include states of None and Unknown
hass.states.async_set(entity_id, 100, {"unit_of_measurement": None})
await hass.async_block_till_done()
hass.states.async_set(entity_id, 200, {"unit_of_measurement": None})
await hass.async_block_till_done()
hass.states.async_set(entity_id, 300, {"unit_of_measurement": POWER_WATT})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing the sensor ignored the source sensor's units until
# they became valid
assert state.attributes.get("unit_of_measurement") == ENERGY_WATT_HOUR
async def test_device_class(hass):
"""Test integration sensor units using a power source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
# This replicates the current sequence when HA starts up in a real runtime
# by updating the base sensor state before the base sensor's units
# or state have been correctly populated. Those interim updates
# include states of None and Unknown
hass.states.async_set(entity_id, STATE_UNKNOWN, {})
await hass.async_block_till_done()
hass.states.async_set(entity_id, 100, {"device_class": None})
await hass.async_block_till_done()
hass.states.async_set(entity_id, 200, {"device_class": None})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert "device_class" not in state.attributes
hass.states.async_set(
entity_id, 300, {"device_class": SensorDeviceClass.POWER}, force_update=True
)
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
# Testing the sensor ignored the source sensor's device class until
# it became valid
assert state.attributes.get("device_class") == SensorDeviceClass.ENERGY
async def test_calc_errors(hass):
"""Test integration sensor units using a power source."""
config = {
"sensor": {
"platform": "integration",
"name": "integration",
"source": "sensor.power",
}
}
assert await async_setup_component(hass, "sensor", config)
entity_id = config["sensor"]["source"]
hass.states.async_set(entity_id, None, {})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
# With the source sensor in a None state, the Reimann sensor should be
# unknown
assert state is not None
assert state.state == STATE_UNKNOWN
# Moving from an unknown state to a value is a calc error and should
# not change the value of the Reimann sensor.
hass.states.async_set(entity_id, 0, {"device_class": None})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert state.state == STATE_UNKNOWN
# With the source sensor updated successfully, the Reimann sensor
# should have a zero (known) value.
hass.states.async_set(entity_id, 1, {"device_class": None})
await hass.async_block_till_done()
state = hass.states.get("sensor.integration")
assert state is not None
assert round(float(state.state)) == 0
| |
from keras.models import Model, model_from_json
from keras.layers import Flatten, Dense, BatchNormalization, Dropout, Reshape, Permute, Activation, Input
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D
from keras.optimizers import SGD, Adam
from keras.callbacks import CSVLogger, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TensorBoard
from keras import backend as K
import numpy as np
import h5py
import cv2
from glob import glob
from time import time
from os.path import isfile
from random import shuffle
import matplotlib.pyplot as plt
start_time = time()
# source activate deepenv1
# nohup python train.py &
# ps -ef | grep train.py
# kill UID
def train(db, keys, avg, batch_size, epochs, nb_tr, nb_val , samples=None, val_samples=None, labels=True, scale_affords= False):
if samples is None:
samples = int(nb_tr/batch_size)
if val_samples is None:
val_samples = int(nb_val/batch_size)
if pretrained and isfile(weights_filename):
model = alexnet(weights_path=weights_filename)
else:
model = alexnet()
model.fit_generator( our_datagen(db, keys[0:nb_tr], avg, batch_size, labels=True,scale_affords=scale_out),
samples_per_epoch = samples, nb_epoch = epochs,
verbose=2, callbacks=[csvlog, reduce_lr, mdlchkpt,tbCallBack],
validation_data=our_datagen(db, keys[nb_tr:nb_tr+nb_val], avg, batch_size, labels=True,scale_affords=scale_out),
nb_val_samples=val_samples)
model.save(model_filename)
model.save_weights(weights_filename)
return model
def alexnet(weights_path=None):
"""
Returns a keras model for a CNN.
input data are of the shape (227,227), and the colors in the RGB order (default)
model: The keras model for this convnet
output_dict: Dict of feature layers, asked for in output_layers.
"""
inputs = Input(shape=dim)
conv_1 = Convolution2D(96, 11, 11, subsample=(4, 4), activation='relu', name='conv_1')(inputs)
# initial weights filler? gaussian, std 0.01
conv_2 = MaxPooling2D((3, 3), strides=(2, 2))(conv_1)
conv_2 = BatchNormalization()(conv_2)
# in caffe: Local Response Normalization (LRN)
# alpha = 1e-4, k=2, beta=0.75, n=5,
#conv_2 = ZeroPadding2D((2, 2))(conv_2)
conv_2 = Convolution2D(256, 5, 5, activation="relu", name='conv_2')(conv_2)
conv_3 = MaxPooling2D((3, 3), strides=(2, 2))(conv_2)
conv_3 = BatchNormalization()(conv_3)
#conv_3 = ZeroPadding2D((1, 1))(conv_3)
conv_3 = Convolution2D(384, 3, 3, activation='relu', name='conv_3')(conv_3)
#conv_3 = ZeroPadding2D((1, 1))(conv_3)
conv_4 = Convolution2D(384, 3, 3, activation="relu", name='conv_4')(conv_3)
#conv_4 = ZeroPadding2D((1, 1))(conv_4)
conv_5 = Convolution2D(256, 3, 3, activation="relu", name='conv_5')(conv_4)
if same_size is True:
dense_1 = MaxPooling2D((3, 3), strides=(2, 2), name="convpool_5")(conv_5)
dense_1 = Flatten(name="flatten")(dense_1)
else:
dense_1 = Flatten(name="flatten")(conv_5)#(dense_1)
# initial weights filler? gaussian, std 0.005
dense_1 = Dense(4096, activation='relu', name='dense_1')(dense_1)
dense_2 = Dropout(0.5)(dense_1)
dense_2 = Dense(4096, activation='relu', name='dense_2')(dense_2)
dense_3 = Dropout(0.5)(dense_2)
# initial weights filler? gaussian, std 0.01
dense_3 = Dense(256, activation='relu', name='dense_3')(dense_3)
dense_4 = Dropout(0.5)(dense_3)
# output: 14 affordances, gaussian std 0.01
dense_4 = Dense(13, activation='linear', name='dense_4')(dense_4)
# dense_4 = Dense(14, activation='linear', name='dense_4')(dense_4)
model = Model(input=inputs, output=dense_4)
model.summary()
raw_input("Press Enter to continue...")
if weights_path:
model.load_weights(weights_path)
# sgd = SGD(lr=0.01, decay=0.0005, momentum=0.9) # nesterov=True) # LSTM
adam = Adam(lr=5e-4)
model.compile(optimizer=adam, loss='mse',metrics=['mae']) # try cross-entropy
return model
def our_datagen(db, keys, avg,batch_size,labels=True,scale_affords=False):
n = len(keys)/batch_size
n = int(n)
affordance_dim = 13
for index in range(0,n):
xdim = (batch_size,) + dim
X_train = np.zeros(xdim)
Y_train = np.zeros((batch_size, affordance_dim))
for i, key in enumerate(keys[index:(index+batch_size)]):
img = cv2.imread(key)
# img.shape = 210x280x3
if not same_size:
img = cv2.resize(img, (64, 64))
img = img / 255.0
img = np.subtract(img, avg)
if K.image_dim_ordering() == 'th':
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 0, 1)
X_train[i] = img
if labels is True:
j = int(key[-12:-4])
affordances = db[j - 1]
if int(affordances[0]) != j:
raise ValueError('Image and affordance do not match: ' + str(j))
affordances = affordances[1:(affordance_dim+1)]
if scale_affords is True:
affordances = scale_output(affordances)
affordances = affordances.reshape(1, affordance_dim)
Y_train[i] = affordances
if labels is True:
yield X_train, Y_train
else:
yield X_train
def predict_affordances(db, keys, avg, model, batch_size, verbose = 0, scale_affords=False):
nb_ts = len(keys)
nb = int(nb_ts/batch_size)
affordance_dim = 13
Y_true = np.zeros((nb*batch_size, affordance_dim))
Y_pred = np.zeros((nb*batch_size, affordance_dim))
err = np.zeros((nb*batch_size, affordance_dim))
err_avg = np.zeros((1, affordance_dim))
for index in range(0,nb):
#xdim = (batch_size,) + dim
#X_train = np.zeros(xdim)
#Y_train = np.zeros((batch_size, affordance_dim))
for i, key in enumerate(keys[index:(index+batch_size)]):
img = cv2.imread(key)
# img.shape = 210x280x3
if not same_size:
img = cv2.resize(img, (64, 64))
img = img / 255.0
img = np.subtract(img, avg)
if K.image_dim_ordering() == 'th':
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 0, 1)
img = np.expand_dims(img, axis=0)
j = int(key[-12:-4])
affordances = db[j - 1]
if int(affordances[0]) != j:
raise ValueError('Image and affordance do not match: ' + str(j))
affordances = affordances[1:(affordance_dim+1)]
if scale_affords is True:
affordances = scale_output(affordances)
affordances = affordances.reshape(1, affordance_dim)
affords_pred = model.predict(img)
Y_true[i + (index*batch_size)] = affordances
Y_pred[i + (index*batch_size)] = affords_pred
err[i + (index*batch_size)] = np.abs(affords_pred - affordances)
#predict.append(Y_train)
if verbose is 1:
test = (index+1)*batch_size
print('Number of samples predicted so far:' + str(test))
err_avg = err.mean(axis=0)
return Y_pred, Y_true, err, err_avg
def scale_output(affordances):
''' Scale output between [0.1, 0.9]
'''
affordances[0] = affordances[0] / 1.1 + 0.5 # angle
affordances[1] = affordances[1] / 5.6249 + 1.34445 # toMarking_L
affordances[2] = affordances[2] / 6.8752 + 0.39091 # toMarking_M
affordances[3] = affordances[3] / 5.6249 - 0.34445 # toMarking_R
affordances[4] = affordances[4] / 95 + 0.12 # dist_L
affordances[5] = affordances[5] / 95 + 0.12 # dist_R
affordances[6] = affordances[6] / 6.8752 + 1.48181 # toMarking_LL
affordances[7] = affordances[7] / 6.25 + 0.98 # toMarking_ML
affordances[8] = affordances[8] / 6.25 + 0.02 # toMarking_MR
affordances[9] = affordances[9] / 6.8752 - 0.48181 # toMarking_RR
affordances[10] = affordances[10] / 95 + 0.12 # dist_LL
affordances[11] = affordances[11] / 95 + 0.12 # dist_MM
affordances[12] = affordances[12] / 95 + 0.12 # dist_RR
return affordances
def descale_output(affordances):
affordances_unnorm = np.zeros(affordances.shape)
affordances_unnorm[:,0] = (affordances[:,0] - 0.5) * 1.1
affordances_unnorm[:,1] = (affordances[:,1] - 1.34445) * 5.6249
affordances_unnorm[:,2] = (affordances[:,2] - 0.39091) * 6.8752
affordances_unnorm[:,3] = (affordances[:,3] + 0.34445) * 5.6249
affordances_unnorm[:,4] = (affordances[:,4] - 0.12) * 95
affordances_unnorm[:,5] = (affordances[:,5] - 0.12) * 95
affordances_unnorm[:,6] = (affordances[:,6] - 1.48181) * 6.8752
affordances_unnorm[:,7] = (affordances[:,7] - 0.98) * 6.25
affordances_unnorm[:,8] = (affordances[:,8] - 0.02) * 6.25
affordances_unnorm[:,9] = (affordances[:,9] + 0.48181) * 6.8752
affordances_unnorm[:,10] = (affordances[:,10] - 0.12) * 95
affordances_unnorm[:,11] = (affordances[:,11] - 0.12) * 95
affordances_unnorm[:,12] = (affordances[:,12] - 0.12) * 95
return affordances_unnorm
def load_average():
h5f = h5py.File('/home/exx/Avinash/DReD/local/deepdriving_average.h5', 'r')
avg = h5f['average'][:]
h5f.close()
return avg
if __name__ == "__main__":
dbpath = '/data/deepdriving/train_images/'
keys = glob(dbpath + '*.jpg')
#keys.sort()
db = np.load(dbpath + 'affordances.npy')
# TODO : shuffle and keep aligned
db = db.astype('float32')
avg = load_average()
scale_out = False
same_size = True
pretrained = False
model_num = 9
folder = "/home/exx/Avinash/DReD/local/"
model_filename = folder + 'models/cnnmodel%d.json' % model_num
weights_filename = folder + 'models/cnnmodel%d_weights.h5' % model_num
logs_path = folder + "models/run%d/" % model_num
csvlog_filename = folder + 'models/cnnmodel%d.csv' % model_num
# tensorboard --logdir /home/exx/Avinash/DReD/local/models/
tbCallBack = TensorBoard(log_dir=logs_path, histogram_freq=0, write_graph=True, write_images=False)
csvlog = CSVLogger(csvlog_filename, separator=',', append=False)
mdlchkpt = ModelCheckpoint(weights_filename, monitor='val_loss', save_best_only=True, save_weights_only=True, period=2, verbose=1)
erlystp = EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=10, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.8, patience=5, min_lr=1e-5, verbose=1)
if K.image_dim_ordering() == 'tf':
print('Tensorflow')
if same_size:
dim = (210, 280, 3)
else:
dim = (64, 64, 3)
else:
print('Theano')
if same_size:
dim = (3, 210, 280)
else:
dim = (3, 64, 64)
# avg.shape = 210x280x3
if not same_size:
avg = cv2.resize(avg, (64, 64))
batch_size = 32
epochs = 25
nb_tr = 350000
nb_val = 50000
nb_ts = 5056 #84800
if os.path.exists(model_filename):
json_file = open(model_filename, 'r')
model_json = json_file.read()
json_file.close()
print('Model found and loading ...')
model = model_from_json(model_json)
print("Loading the best weights for evaluation")
model.load_weights(weights_filename)
adam = Adam(lr=5e-4)
model.compile(optimizer=adam, loss='mse',metrics=['mae']) # try cross-entropy
else:
print('New model is built and training...')
model = train(db, keys, avg, batch_size, epochs, nb_tr, nb_val , samples=None, val_samples=None, labels=True,scale_affords=scale_out)
print("Loading the best weights for evaluation")
model.load_weights(weights_filename)
# saving the model to disk
model_json = model.to_json()
with open(model_filename, "w") as json_file:
json_file.write(model_json)
print("Saved model to disk")
ts_samples = int(nb_ts/batch_size)
score = model.evaluate_generator(our_datagen(db, keys[nb_tr+nb_val:nb_tr+nb_val+nb_ts], avg, batch_size), ts_samples)
print('TestData MSE:', score[0])
print('TestData MAE', score[1])
Y_pred, Y_true, err, err_avg = predict_affordances(db, keys[nb_tr+nb_val:nb_tr+nb_val+nb_ts], avg, model, batch_size, verbose=1, scale_affords = scale_out)
if scale_out is True:
Y_pred_unnorm = descale_output(Y_pred)
Y_true_unnorm = descale_output(Y_true)
err = descale_output(err)
err_avg = descale_output(err_avg.reshape(1,13))
print("Time taken is %s seconds " % (time() - start_time))
| |
"""
Test the model blocks
"""
import datetime
from django.test import TestCase
from mock import Mock
from django.db.models import Model, IntegerField, DateTimeField, CharField
from django.template import Context, Template, TemplateSyntaxError
from example_project.pepulator_factory.models import Pepulator, Distributor
from model_blocks.templatetags import model_filters
from model_blocks.templatetags import model_nodes
class DetailBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template(('{{ title|default_if_none:instance|safe }}:{{ model|safe }},'
'{% for name, label, value, is_list, is_link in fields %}'
'{{ name|safe }},'
'{{ label|safe }},'
'{% if not is_list %}'
'{% if is_link %}'
'@{{ value }}'
'{% else %}'
'{{ value|safe }}'
'{% endif %}'
'{% else %}'
'[{% for item in value.all %}{{ item|safe }},{% endfor %}]'
'{% endif %},'
'{% endfor %}')))
def test_model_format(self):
"""Tests that a given model is formatted as expected."""
pepulator = Pepulator.objects.get(serial_number=1235)
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = model_filters.as_detail_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulator|as_detail_block }}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_title_is_used(self):
"""Test that a title is used if provided"""
template = Template(('{% load model_filters %}'
'{{ pepulator|as_detail_block:"My Pepulator" }}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"My Pepulator:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,@ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
def test_related_fields(self):
"""Tests that related fields not defined on the model are included."""
pepulator = Distributor.objects.get(name="Mom & Pop")
expected_detail = (u"Mom & Pop:distributor,"
"name,name,Mom & Pop,"
"capacity,capacity,175,"
"stock,stock,[Pepulator #1238,],"
)
detail = model_filters.as_detail_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_detail.html')
self.assertEqual(detail, expected_detail)
class TeaserBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_model_format(self):
"""Tests that a given model is formatted as expected."""
pepulator = Pepulator.objects.get(serial_number=1235)
expected_teaser = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
teaser = model_filters.as_teaser_block(pepulator)
model_nodes.get_template.assert_called_with('model_blocks/object_teaser.html')
self.assertEqual(teaser, expected_teaser)
class ListBlockFilterTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}'))
def test_list_format(self):
"""Tests that a given model is formatted as expected."""
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = model_filters.as_list_block(pepulator_list)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_empty_queryset(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=5000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_non_query_set_results_in_no_model(self):
"""Test that when a non queryset is used, the model is None"""
# Why? Because we try to read the model off of the queryset. If we just
# have a list of objects, then we don't know the model.
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = [p for p in Pepulator.objects.filter(serial_number__gt=2000)]
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Nones:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_empty_list(self):
"""Test that when a non queryset is used, the model is None"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block }}'))
pepulator_list = []
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Nones:[]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
def test_alternate_title_is_used(self):
"""Test that a list title is used if provided"""
template = Template(('{% load model_filters %}'
'{{ pepulators|as_list_block:"Some Pepulators" }}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Some Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('model_blocks/object_list.html')
self.assertEqual(rendering, expected_rendering)
class DetailBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_tag_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% detail_block pepulator %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
detail = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_detail.html')
self.assertEqual(detail, expected_detail)
def test_with_specific_fields(self):
"""Test that the included fields spec is respected"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% with pepulator_factory_pepulator_fields="serial_number, color, height, width" %}'
'{% detail_block pepulator %}'
'{% endwith %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"color,color,red,"
"height,height,12,"
"width,width,15,"
)
detail = template.render(context)
self.assertEqual(detail, expected_detail)
def test_with_excluded_fields(self):
"""Test that the excluded fields spec is respected"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_detail_template="pepulator_factory/pepulator_detail.html" %}'
'{% with pepulator_factory_pepulator_exclude="knuckles, jambs, color, address" %}'
'{% detail_block pepulator %}'
'{% endwith %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_detail = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"distributed_by,distributed by,Walmart,"
)
detail = template.render(context)
self.assertEqual(detail, expected_detail)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% detail_block pepulator "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% detail_block %}'))
class TeaserBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:instance|safe }}:{{ model|safe }},{% for name, label, value, is_list in fields %}{{ name|safe }},{{ label|safe }},{% if not is_list %}{{ value|safe }}{% else %}[{% for item in value.all %}{{ item|safe }},{% endfor %}]{% endif %},{% endfor %}'))
def test_tag_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_teaser_template="pepulator_factory/pepulator_teaser.html" %}'
'{% teaser_block pepulator %}'
'{% endwith %}'))
pepulator = Pepulator.objects.get(serial_number=1235)
context = Context({'pepulator':pepulator})
expected_teaser = (u"Pepulator #1235:pepulator,"
"serial_number,serial number,1235,"
"height,height,12,"
"width,width,15,"
"manufacture_date,manufacture date,2011-06-10 11:12:33,"
"color,color,red,"
"address,address,ppr://1235/,"
"distributed_by,distributed by,Walmart,"
"knuckles,knuckles,[Knuckle of hardness 2.35,Knuckle of hardness 1.10,],"
"jambs,jambs,[],"
)
teaser = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_teaser.html')
self.assertEqual(teaser, expected_teaser)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% teaser_block pepulator "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% teaser_block %}'))
class ListBlockTagTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}:{{ instance_list|safe }}'))
def test_filter_is_registered(self):
"""Test that the filter can be used from within a template"""
template = Template(('{% load model_tags %}'
'{% with pepulator_factory_pepulator_list_template="pepulator_factory/pepulator_list.html" %}'
'{% list_block pepulators %}'
'{% endwith %}'))
pepulator_list = Pepulator.objects.filter(serial_number__gt=2000)
context = Context({'pepulators':pepulator_list})
expected_rendering = (u"Pepulators:[<Pepulator: Pepulator #2345>, "
"<Pepulator: Pepulator #2346>]")
rendering = template.render(context)
model_nodes.get_template.assert_called_with('pepulator_factory/pepulator_list.html')
self.assertEqual(rendering, expected_rendering)
def test_fail_on_wrong_number_of_arguments(self):
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% list_block pepulators "overflow" %}'))
self.assertRaises(TemplateSyntaxError, Template,
('{% load model_tags %}'
'{% list_block %}'))
class ModelBlockModuleTest (TestCase):
def test_all_tags_and_filters_loaded(self):
template = Template(('{% load model_blocks %}'
'{% detail_block pepulator %}'
'{% list_block pepulators %}'
'{{ pepulator|as_detail_block }}'
'{{ pepulators|as_list_block }}'))
# We just care that everything loaded, and we were able to get here
# without incidence.
self.assert_(True)
class SideEffectsTest (TestCase):
fixtures = ['pepulator_factory_data.json']
def setUp(self):
# Mock Django's get_template so that it doesn't load a real file;
# instead just return a template that allows us to verify the context
model_nodes.get_template = Mock(
return_value=Template('{{ title|default_if_none:model|capfirst }}{% if not title %}s{% endif %}'))
def test_model_doesnt_carry_over_into_future_blocks(self):
template = Template(('{% load model_tags %}'
'{{ model }}'
'{% list_block distributors %}'
'{{ model }}'))
distributor_list = Distributor.objects.all()
context = Context({'model':'My String',
'distributors':distributor_list})
expected_rendering = (u"My String"
"Distributors"
"My String")
rendering = template.render(context)
self.assertEqual(rendering, expected_rendering)
| |
##
##
import unittest
import urllib2
from HTMLParser import HTMLParser
from xml.etree.ElementTree import parse as parseXml
from json import load as loadjson
from urlparse import urljoin
from base64 import b64encode
#
# Test the localizaiton REST service.
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# --------- -------- --------- --------------------------
# 08/07/17 5731 bsteffen Initial Creation.
baseURL = "http://localhost:9581/services/localization/"
testSite = "OAX"
testDir = "menus"
testFile = "test.xml"
username = "test"
password = username
base64string = b64encode('%s:%s' % (username, password))
authString = "Basic %s" % base64string
class ValidHTMLParser(HTMLParser):
"""Simple HTML parser that performs very minimal validation.
This ensures that all start and end tags match, and also that there are
some tags. It also accumulates the text of all links in the html file
in the link_texts attribute, which can be used for further validation.
"""
def __init__(self, testcase):
HTMLParser.__init__(self)
self._testcase = testcase
self._tags = []
self._any = False
self.link_texts = []
def handle_starttag(self, tag, attrs):
self._tags.append(tag)
self._any = True
def handle_endtag(self, tag):
self._testcase.assertNotEquals([], self._tags, "Unstarted end tag " + tag)
self._testcase.assertEquals(tag, self._tags.pop())
def handle_data(self, data):
if self._tags[-1] == "a":
self.link_texts.append(data)
def close(self):
HTMLParser.close(self)
self._testcase.assertTrue(self._any)
self._testcase.assertEquals([], self._tags)
class AbstractListingTestCase():
"""Base test case for testing listings, retrieves data as html, xml, and json.
Sub classes should implement assertValidHtml, assertValidXml, and
assertValidJson to ensure that the content returned matches what was
expected.
"""
def assertRequestGetsHtml(self, request):
response = urllib2.urlopen(request)
self.assertEquals(response.headers["Content-Type"], "text/html")
body = response.read()
parser = ValidHTMLParser(self)
parser.feed(body)
parser.close()
self.assertValidHtml(parser)
def assertValidHtml(self, parser):
"""Intended to be overriden by subclasses to validate HTML content.
The argument is a populated instance of ValidHTMLParser.
"""
pass
def test_default(self):
request = urllib2.Request(self.url)
self.assertRequestGetsHtml(request)
def test_last_slash(self):
if self.url.endswith("/"):
request = urllib2.Request(self.url[:-1])
else:
request = urllib2.Request(self.url + "/")
self.assertRequestGetsHtml(request)
def test_wild_mime(self):
request = urllib2.Request(self.url)
request.add_header("Accept", "*/*")
self.assertRequestGetsHtml(request)
request.add_header("Accept", "text/*")
self.assertRequestGetsHtml(request)
def test_html(self):
request = urllib2.Request(self.url)
request.add_header("Accept", "text/html")
self.assertRequestGetsHtml(request)
def test_json(self):
request = urllib2.Request(self.url)
request.add_header("Accept", "application/json")
response = urllib2.urlopen(request)
self.assertEquals(response.headers["Content-Type"], "application/json")
jsonData = loadjson(response)
self.assertValidJson(jsonData)
def assertValidJson(self, jsonData):
"""Intended to be overriden by subclasses to validate JSON content.
The argument is a python object as returned from json.load
"""
pass
def test_xml(self):
request = urllib2.Request(self.url)
request.add_header("Accept", "application/xml")
response = urllib2.urlopen(request)
self.assertEquals(response.headers["Content-Type"], "application/xml")
xmlData = parseXml(response)
self.assertValidXml(xmlData)
def assertValidXml(self, xmlData):
"""Intended to be overriden by subclasses to validate XML content.
The argument is an ElementTree
"""
pass
def test_delete(self):
request = urllib2.Request(self.url)
request.get_method = lambda: "DELETE"
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(405, cm.exception.code)
def test_put(self):
request = urllib2.Request(self.url)
request.get_method = lambda: "PUT"
request.add_data("Test Data")
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(405, cm.exception.code)
def test_unacceptable(self):
request = urllib2.Request(self.url)
request.add_header("Accept", "application/fakemimetype")
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(406, cm.exception.code)
request.add_header("Accept", "fakemimetype/*")
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(406, cm.exception.code)
def test_accept_quality_factor(self):
request = urllib2.Request(self.url)
request.add_header("Accept", "application/xml; q=0.8, application/json; q=0.2")
response = urllib2.urlopen(request)
self.assertEquals(response.headers["Content-Type"], "application/xml")
xmlData = parseXml(response)
self.assertValidXml(xmlData)
request.add_header("Accept", "application/xml; q=0.2, application/json; q=0.8")
response = urllib2.urlopen(request)
self.assertEquals(response.headers["Content-Type"], "application/json")
jsonData = loadjson(response)
self.assertValidJson(jsonData)
request.add_header("Accept", "application/xml, application/json; q=0.8")
response = urllib2.urlopen(request)
self.assertEquals(response.headers["Content-Type"], "application/xml")
xmlData = parseXml(response)
self.assertValidXml(xmlData)
request.add_header("Accept", "application/fakemimetype, application/json; q=0.8")
response = urllib2.urlopen(request)
self.assertEquals(response.headers["Content-Type"], "application/json")
jsonData = loadjson(response)
self.assertValidJson(jsonData)
class RootTestCase(AbstractListingTestCase, unittest.TestCase):
"""Test that the root of the localization service returns listing of localization types."""
def setUp(self):
self.url = baseURL
def assertValidHtml(self, parser):
self.assertIn("common_static/", parser.link_texts)
def assertValidJson(self, jsonData):
self.assertIn("common_static/", jsonData)
def assertValidXml(self, xmlData):
root = xmlData.getroot()
self.assertEquals(root.tag, "entries")
names = [e.text for e in root.findall("entry")]
self.assertIn("common_static/", names)
class TypeTestCase(AbstractListingTestCase, unittest.TestCase):
"""Test that common_static will list context levels."""
def setUp(self):
self.url = urljoin(baseURL, "common_static/")
def assertValidHtml(self, parser):
self.assertIn("base/", parser.link_texts)
self.assertIn("site/", parser.link_texts)
def assertValidJson(self, jsonData):
self.assertIn("base/", jsonData)
self.assertIn("site/", jsonData)
def assertValidXml(self, xmlData):
root = xmlData.getroot()
self.assertEquals(root.tag, "entries")
names = [e.text for e in root.findall("entry")]
self.assertIn("base/", names)
self.assertIn("site/", names)
class LevelTestCase(AbstractListingTestCase, unittest.TestCase):
"""Test that common_static/site will list sites."""
def setUp(self):
self.url = urljoin(baseURL, "common_static/site/")
def assertValidHtml(self, parser):
self.assertIn(testSite +"/", parser.link_texts)
def assertValidJson(self, jsonData):
self.assertIn(testSite +"/", jsonData)
def assertValidXml(self, xmlData):
root = xmlData.getroot()
self.assertEquals(root.tag, "entries")
names = [e.text for e in root.findall("entry")]
self.assertIn(testSite +"/", names)
class AbstractFileListingTestCase(AbstractListingTestCase):
"""Base test case for a file listing"""
def assertValidHtml(self, parser):
self.assertIn(testDir +"/", parser.link_texts)
self.assertEquals(parser.link_texts, sorted(parser.link_texts))
def assertValidJson(self, jsonData):
self.assertIn(testDir +"/", jsonData)
def assertValidXml(self, xmlData):
root = xmlData.getroot()
self.assertEquals(root.tag, "files")
names = [e.get("name") for e in root.findall("file")]
self.assertIn(testDir +"/", names)
self.assertEquals(names, sorted(names))
class BaseFileListingTestCase(AbstractFileListingTestCase, unittest.TestCase):
"""Test that common_static/base lists files"""
def setUp(self):
self.url = urljoin(baseURL, "common_static/base/")
class SiteFileListingTestCase(AbstractFileListingTestCase, unittest.TestCase):
"""Test that common_static/site/<testSite>/ lists files"""
def setUp(self):
self.url = urljoin(baseURL, "common_static/site/" + testSite + "/")
class FileTestCase(unittest.TestCase):
"""Test retrieval, modification and deletion of an individual."""
def setUp(self):
self.url = urljoin(baseURL, "common_static/user/" + username + "/" + testFile)
# The file should not exist before the test, but if it does then delete it
# This is some of the same functionality we are testing so if setup fails
# then the test would probably fail anyway
try:
request = urllib2.Request(self.url)
response = urllib2.urlopen(request)
request = urllib2.Request(self.url)
request.get_method = lambda: "DELETE"
request.add_header("Authorization", authString)
request.add_header("If-Match", response.headers["Content-MD5"])
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code != 404:
raise e
def test_file_operations(self):
"""Run through a typical set of file interactions and verify everything works correctly."""
request = urllib2.Request(self.url)
request.get_method = lambda: "PUT"
request.add_data("Test Data")
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(401, cm.exception.code)
request.add_header("Authorization", authString)
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(409, cm.exception.code)
request.add_header("If-Match", "NON_EXISTENT_CHECKSUM")
response = urllib2.urlopen(request)
request = urllib2.Request(self.url)
response = urllib2.urlopen(request)
self.assertEquals(response.read(), "Test Data")
request = urllib2.Request(self.url + "/")
response = urllib2.urlopen(request)
self.assertEquals(response.read(), "Test Data")
request = urllib2.Request(self.url)
request.get_method = lambda: "PUT"
request.add_data("Test Data2")
request.add_header("If-Match", response.headers["Content-MD5"])
request.add_header("Authorization", authString)
response = urllib2.urlopen(request)
checksum = response.headers["Content-MD5"]
request = urllib2.Request(self.url)
response = urllib2.urlopen(request)
self.assertEquals(response.read(), "Test Data2")
request = urllib2.Request(self.url)
request.get_method = lambda: "DELETE"
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(401, cm.exception.code)
request.add_header("Authorization", authString)
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(409, cm.exception.code)
request.add_header("If-Match", checksum)
response = urllib2.urlopen(request)
request = urllib2.Request(self.url)
with self.assertRaises(urllib2.HTTPError) as cm:
response = urllib2.urlopen(request)
self.assertEqual(404, cm.exception.code)
if __name__ == '__main__':
unittest.main()
| |
# Copyright 2017 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
_FLASK_TRACE_ID = "flask0id"
_FLASK_SPAN_ID = "span0flask"
_FLASK_HTTP_REQUEST = {"requestUrl": "https://flask.palletsprojects.com/en/1.1.x/"}
_DJANGO_TRACE_ID = "django0id"
_DJANGO_SPAN_ID = "span0django"
_DJANGO_HTTP_REQUEST = {"requestUrl": "https://www.djangoproject.com/"}
class Test_get_request_data_from_flask(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging_v2.handlers import _helpers
http, trace, span, sampled = _helpers.get_request_data_from_flask()
return http, trace, span, sampled
@staticmethod
def create_app():
import flask
app = flask.Flask(__name__)
@app.route("/")
def index():
return "test flask trace" # pragma: NO COVER
return app
def test_no_context_header(self):
app = self.create_app()
with app.test_request_context(path="/", headers={}):
http_request, trace_id, span_id, sampled = self._call_fut()
self.assertIsNone(trace_id)
self.assertIsNone(span_id)
self.assertEqual(sampled, False)
self.assertEqual(http_request["requestMethod"], "GET")
def test_xcloud_header(self):
flask_trace_header = "X_CLOUD_TRACE_CONTEXT"
expected_trace_id = _FLASK_TRACE_ID
expected_span_id = _FLASK_SPAN_ID
flask_trace_id = f"{expected_trace_id}/{expected_span_id};o=1"
app = self.create_app()
context = app.test_request_context(
path="/", headers={flask_trace_header: flask_trace_id}
)
with context:
http_request, trace_id, span_id, sampled = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
self.assertEqual(span_id, expected_span_id)
self.assertEqual(sampled, True)
self.assertEqual(http_request["requestMethod"], "GET")
def test_traceparent_header(self):
flask_trace_header = "TRACEPARENT"
expected_trace_id = "4bf92f3577b34da6a3ce929d0e0e4736"
expected_span_id = "00f067aa0ba902b7"
flask_trace_id = f"00-{expected_trace_id}-{expected_span_id}-01"
app = self.create_app()
context = app.test_request_context(
path="/", headers={flask_trace_header: flask_trace_id}
)
with context:
http_request, trace_id, span_id, sampled = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
self.assertEqual(span_id, expected_span_id)
self.assertEqual(sampled, True)
self.assertEqual(http_request["requestMethod"], "GET")
def test_http_request_populated(self):
expected_path = "http://testserver/123"
expected_agent = "Mozilla/5.0"
expected_referrer = "self"
expected_ip = "10.1.2.3"
headers = {
"User-Agent": expected_agent,
"Referer": expected_referrer,
}
app = self.create_app()
with app.test_request_context(
expected_path, headers=headers, environ_base={"REMOTE_ADDR": expected_ip}
):
http_request, *_ = self._call_fut()
self.assertEqual(http_request["requestMethod"], "GET")
self.assertEqual(http_request["requestUrl"], expected_path)
self.assertEqual(http_request["userAgent"], expected_agent)
self.assertEqual(http_request["protocol"], "HTTP/1.1")
def test_http_request_sparse(self):
expected_path = "http://testserver/123"
app = self.create_app()
with app.test_request_context(expected_path):
http_request, *_ = self._call_fut()
self.assertEqual(http_request["requestMethod"], "GET")
self.assertEqual(http_request["requestUrl"], expected_path)
self.assertEqual(http_request["protocol"], "HTTP/1.1")
class Test_get_request_data_from_django(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging_v2.handlers import _helpers
http, trace, span, sampled = _helpers.get_request_data_from_django()
return http, trace, span, sampled
def setUp(self):
from django.conf import settings
from django.test.utils import setup_test_environment
if not settings.configured:
settings.configure()
setup_test_environment()
def tearDown(self):
from django.test.utils import teardown_test_environment
from google.cloud.logging_v2.handlers.middleware import request
teardown_test_environment()
request._thread_locals.__dict__.clear()
def test_no_context_header(self):
from django.test import RequestFactory
from google.cloud.logging_v2.handlers.middleware import request
django_request = RequestFactory().get("/")
middleware = request.RequestMiddleware(None)
middleware.process_request(django_request)
http_request, trace_id, span_id, sampled = self._call_fut()
self.assertEqual(http_request["requestMethod"], "GET")
self.assertIsNone(trace_id)
self.assertIsNone(span_id)
self.assertEqual(sampled, False)
def test_xcloud_header(self):
from django.test import RequestFactory
from google.cloud.logging_v2.handlers.middleware import request
django_trace_header = "HTTP_X_CLOUD_TRACE_CONTEXT"
expected_span_id = _DJANGO_SPAN_ID
expected_trace_id = _DJANGO_TRACE_ID
django_trace_id = f"{expected_trace_id}/{expected_span_id};o=1"
django_request = RequestFactory().get(
"/", **{django_trace_header: django_trace_id}
)
middleware = request.RequestMiddleware(None)
middleware.process_request(django_request)
http_request, trace_id, span_id, sampled = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
self.assertEqual(span_id, expected_span_id)
self.assertEqual(sampled, True)
self.assertEqual(http_request["requestMethod"], "GET")
def test_traceparent_header(self):
from django.test import RequestFactory
from google.cloud.logging_v2.handlers.middleware import request
django_trace_header = "HTTP_TRACEPARENT"
expected_trace_id = "4bf92f3577b34da6a3ce929d0e0e4736"
expected_span_id = "00f067aa0ba902b7"
header = f"00-{expected_trace_id}-{expected_span_id}-01"
django_request = RequestFactory().get("/", **{django_trace_header: header})
middleware = request.RequestMiddleware(None)
middleware.process_request(django_request)
http_request, trace_id, span_id, sampled = self._call_fut()
self.assertEqual(trace_id, expected_trace_id)
self.assertEqual(span_id, expected_span_id)
self.assertEqual(sampled, True)
self.assertEqual(http_request["requestMethod"], "GET")
def test_http_request_populated(self):
from django.test import RequestFactory
from google.cloud.logging_v2.handlers.middleware import request
expected_path = "http://testserver/123"
expected_agent = "Mozilla/5.0"
expected_referrer = "self"
body_content = "test"
django_request = RequestFactory().put(
expected_path,
data=body_content,
HTTP_USER_AGENT=expected_agent,
HTTP_REFERER=expected_referrer,
)
# ensure test passes even after request has been read
# context: https://github.com/googleapis/python-logging/issues/159
django_request.read()
middleware = request.RequestMiddleware(None)
middleware.process_request(django_request)
http_request, *_ = self._call_fut()
self.assertEqual(http_request["requestMethod"], "PUT")
self.assertEqual(http_request["requestUrl"], expected_path)
self.assertEqual(http_request["userAgent"], expected_agent)
self.assertEqual(http_request["protocol"], "HTTP/1.1")
def test_http_request_sparse(self):
from django.test import RequestFactory
from google.cloud.logging_v2.handlers.middleware import request
expected_path = "http://testserver/123"
django_request = RequestFactory().put(expected_path)
middleware = request.RequestMiddleware(None)
middleware.process_request(django_request)
http_request, *_ = self._call_fut()
self.assertEqual(http_request["requestMethod"], "PUT")
self.assertEqual(http_request["requestUrl"], expected_path)
self.assertEqual(http_request["protocol"], "HTTP/1.1")
class Test_get_request_data(unittest.TestCase):
@staticmethod
def _call_fut():
from google.cloud.logging_v2.handlers import _helpers
http, trace, span, sampled = _helpers.get_request_data()
return http, trace, span, sampled
def _helper(self, django_return, flask_return):
django_patch = mock.patch(
"google.cloud.logging_v2.handlers._helpers.get_request_data_from_django",
return_value=django_return,
)
flask_patch = mock.patch(
"google.cloud.logging_v2.handlers._helpers.get_request_data_from_flask",
return_value=flask_return,
)
with django_patch as django_mock:
with flask_patch as flask_mock:
result = self._call_fut()
return django_mock, flask_mock, result
def test_from_django(self):
django_expected = (
_DJANGO_HTTP_REQUEST,
_DJANGO_TRACE_ID,
_DJANGO_SPAN_ID,
False,
)
flask_expected = (None, None, None, False)
django_mock, flask_mock, output = self._helper(django_expected, flask_expected)
self.assertEqual(output, django_expected)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_from_flask(self):
django_expected = (None, None, None, False)
flask_expected = (_FLASK_HTTP_REQUEST, _FLASK_TRACE_ID, _FLASK_SPAN_ID, False)
django_mock, flask_mock, output = self._helper(django_expected, flask_expected)
self.assertEqual(output, flask_expected)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_from_django_and_flask(self):
django_expected = (
_DJANGO_HTTP_REQUEST,
_DJANGO_TRACE_ID,
_DJANGO_SPAN_ID,
False,
)
flask_expected = (_FLASK_HTTP_REQUEST, _FLASK_TRACE_ID, _FLASK_SPAN_ID, False)
django_mock, flask_mock, output = self._helper(django_expected, flask_expected)
# Django wins.
self.assertEqual(output, django_expected)
django_mock.assert_called_once_with()
flask_mock.assert_not_called()
def test_missing_http_request(self):
flask_expected = (None, _FLASK_TRACE_ID, _FLASK_SPAN_ID, True)
django_expected = (None, _DJANGO_TRACE_ID, _DJANGO_TRACE_ID, True)
django_mock, flask_mock, output = self._helper(django_expected, flask_expected)
# function only returns trace if http_request data is present
self.assertEqual(output, (None, None, None, False))
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_missing_trace_id(self):
flask_expected = (_FLASK_HTTP_REQUEST, None, None, False)
django_expected = (None, _DJANGO_TRACE_ID, _DJANGO_SPAN_ID, True)
django_mock, flask_mock, output = self._helper(django_expected, flask_expected)
# trace_id is optional
self.assertEqual(output, flask_expected)
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_missing_both(self):
flask_expected = (None, None, None, False)
django_expected = (None, None, None, False)
django_mock, flask_mock, output = self._helper(django_expected, flask_expected)
self.assertEqual(output, (None, None, None, False))
django_mock.assert_called_once_with()
flask_mock.assert_called_once_with()
def test_wo_libraries(self):
output = self._call_fut()
self.assertEqual(output, (None, None, None, False))
class Test__parse_xcloud_trace(unittest.TestCase):
@staticmethod
def _call_fut(header):
from google.cloud.logging_v2.handlers import _helpers
trace, span, sampled = _helpers._parse_xcloud_trace(header)
return trace, span, sampled
def test_empty_header(self):
header = ""
trace_id, span_id, sampled = self._call_fut(header)
self.assertIsNone(trace_id)
self.assertIsNone(span_id)
self.assertEqual(sampled, False)
def test_no_span(self):
header = "12345"
trace_id, span_id, sampled = self._call_fut(header)
self.assertEqual(trace_id, header)
self.assertIsNone(span_id)
self.assertEqual(sampled, False)
def test_no_trace(self):
header = "/12345"
trace_id, span_id, sampled = self._call_fut(header)
self.assertIsNone(trace_id)
self.assertEqual(span_id, "12345")
self.assertEqual(sampled, False)
def test_with_span(self):
expected_trace = "12345"
expected_span = "67890"
header = f"{expected_trace}/{expected_span}"
trace_id, span_id, sampled = self._call_fut(header)
self.assertEqual(trace_id, expected_trace)
self.assertEqual(span_id, expected_span)
self.assertEqual(sampled, False)
def test_with_extra_characters(self):
expected_trace = "12345"
expected_span = "67890"
header = f"{expected_trace}/{expected_span};abc"
trace_id, span_id, sampled = self._call_fut(header)
self.assertEqual(trace_id, expected_trace)
self.assertEqual(span_id, expected_span)
self.assertEqual(sampled, False)
def test_with_explicit_no_sampled(self):
expected_trace = "12345"
expected_span = "67890"
header = f"{expected_trace}/{expected_span};o=0"
trace_id, span_id, sampled = self._call_fut(header)
self.assertEqual(trace_id, expected_trace)
self.assertEqual(span_id, expected_span)
self.assertEqual(sampled, False)
def test_with__sampled(self):
expected_trace = "12345"
expected_span = "67890"
header = f"{expected_trace}/{expected_span};o=1"
trace_id, span_id, sampled = self._call_fut(header)
self.assertEqual(trace_id, expected_trace)
self.assertEqual(span_id, expected_span)
self.assertEqual(sampled, True)
class Test__parse_trace_parent(unittest.TestCase):
@staticmethod
def _call_fut(header):
from google.cloud.logging_v2.handlers import _helpers
trace, span, sampled = _helpers._parse_trace_parent(header)
return trace, span, sampled
def test_empty_header(self):
header = ""
trace_id, span_id, sampled = self._call_fut(header)
self.assertIsNone(trace_id)
self.assertIsNone(span_id)
self.assertEqual(sampled, False)
def test_valid_header(self):
header = "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01"
trace_id, span_id, sampled = self._call_fut(header)
self.assertEqual(trace_id, "0af7651916cd43dd8448eb211c80319c")
self.assertEqual(span_id, "b7ad6b7169203331")
self.assertEqual(sampled, True)
def test_not_sampled(self):
header = "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-00"
trace_id, span_id, sampled = self._call_fut(header)
self.assertEqual(trace_id, "0af7651916cd43dd8448eb211c80319c")
self.assertEqual(span_id, "b7ad6b7169203331")
self.assertEqual(sampled, False)
def test_sampled_w_other_flags(self):
header = "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-09"
trace_id, span_id, sampled = self._call_fut(header)
self.assertEqual(trace_id, "0af7651916cd43dd8448eb211c80319c")
self.assertEqual(span_id, "b7ad6b7169203331")
self.assertEqual(sampled, True)
def test_invalid_headers(self):
invalid_headers = [
"",
"test"
"ff-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01", # invalid version
"00-00000000000000000000000000000000-b7ad6b7169203331-01", # invalid trace
"00-0af7651916cd43dd8448eb211c80319c-0000000000000000-01", # invalid span
"00-af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-00",
"00-0af7651916cd43dd8448eb211c80319c-bad6b7169203331-00",
"00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-0",
"00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-",
"00-0af7651916cd43dd8448eb211c80319c-00",
]
for header in invalid_headers:
trace_id, span_id, sampled = self._call_fut(header)
self.assertIsNone(trace_id)
self.assertIsNone(span_id)
self.assertEqual(sampled, False)
| |
"""Helper methods to handle the time in Home Assistant."""
import datetime as dt
import re
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import ciso8601
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE # pylint: disable=global-statement
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> Optional[dt.tzinfo]:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(UTC)
def now(time_zone: Optional[dt.tzinfo] = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: Optional[dt.datetime] = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(
dt_or_d: Union[dt.date, dt.datetime, None] = None
) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> Optional[dt.datetime]:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
try:
return ciso8601.parse_datetime(dt_str)
except (ValueError, IndexError):
pass
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: Dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: Optional[dt.tzinfo] = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> Optional[dt.date]:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> Optional[dt.time]:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
# Found in this gist: https://gist.github.com/zhangsen/1199964
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
def q_n_r(first: int, second: int) -> Tuple[int, int]:
"""Return quotient and remaining."""
return first // second, first % second
delta = now() - date
day = delta.days
second = delta.seconds
year, day = q_n_r(day, 365)
if year > 0:
return formatn(year, "year")
month, day = q_n_r(day, 30)
if month > 0:
return formatn(month, "month")
if day > 0:
return formatn(day, "day")
hour, second = q_n_r(second, 3600)
if hour > 0:
return formatn(hour, "hour")
minute, second = q_n_r(second, 60)
if minute > 0:
return formatn(minute, "minute")
return formatn(second, "second")
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> List[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str) and parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = list(sorted(int(x) for x in parameter))
for val in res:
if val < min_value or val > max_value:
raise ValueError(
f"Time expression '{parameter}': parameter {val} out of range "
f"({min_value} to {max_value})"
)
return res
def find_next_time_expression_time(
now: dt.datetime, # pylint: disable=redefined-outer-name
seconds: List[int],
minutes: List[int],
hours: List[int],
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never matches!")
def _lower_bound(arr: List[int], cmp: int) -> Optional[int]:
"""Return the first value in arr greater or equal to cmp.
Return None if no such value exists.
"""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] < cmp:
left = mid + 1
else:
right = mid
if left == len(arr):
return None
return arr[left]
result = now.replace(microsecond=0)
# Match next second
next_second = _lower_bound(seconds, result.second)
if next_second is None:
# No second to match in this minute. Roll-over to next minute.
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
# Match next minute
next_minute = _lower_bound(minutes, result.minute)
if next_minute != result.minute:
# We're in the next minute. Seconds needs to be reset.
result = result.replace(second=seconds[0])
if next_minute is None:
# No minute to match in this hour. Roll-over to next hour.
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
# Match next hour
next_hour = _lower_bound(hours, result.hour)
if next_hour != result.hour:
# We're in the next hour. Seconds+minutes needs to be reset.
result = result.replace(second=seconds[0], minute=minutes[0])
if next_hour is None:
# No minute to match in this day. Roll-over to next day.
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if result.tzinfo is None:
return result
# Now we need to handle timezones. We will make this datetime object
# "naive" first and then re-convert it to the target timezone.
# This is so that we can call pytz's localize and handle DST changes.
tzinfo: pytzinfo.DstTzInfo = result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
# This happens when we're leaving daylight saving time and local
# clocks are rolled back. In this case, we want to trigger
# on both the DST and non-DST time. So when "now" is in the DST
# use the DST-on time, and if not, use the DST-off time.
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
# This happens when we're entering daylight saving time and local
# clocks are rolled forward, thus there are local times that do
# not exist. In this case, we want to trigger on the next time
# that *does* exist.
# In the worst case, this will run through all the seconds in the
# time shift, but that's max 3600 operations for once per year
result = result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1)
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst())
if result_dst >= now_dst:
return result
# Another edge-case when leaving DST:
# When now is in DST and ambiguous *and* the next trigger time we *should*
# trigger is ambiguous and outside DST, the excepts above won't catch it.
# For example: if triggering on 2:30 and now is 28.10.2018 2:30 (in DST)
# we should trigger next on 28.10.2018 2:30 (out of DST), but our
# algorithm above would produce 29.10.2018 2:30 (out of DST)
# Step 1: Check if now is ambiguous
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# Step 2: Check if result of (now - DST) is ambiguous.
check = now - now_dst
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# OK, edge case does apply. We must override the DST to DST-off
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" P1 tests for Scaling up Vm
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from nose.plugins.attrib import attr
_multiprocess_shared_ = True
class Services:
"""Test VM Life Cycle Services
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended in create account to
# ensure unique username generated each time
"password": "password",
},
"small":
# Create a small virtual machine instance with disk offering
{
"displayname": "testserver",
"username": "root", # VM creds for SSH
"password": "password",
"ssh_port": 22,
"hypervisor": 'XenServer',
"privateport": 22,
"publicport": 22,
"protocol": 'TCP',
},
"service_offerings":
{
"small":
{
# Small service offering ID to for change VM
# service offering from medium to small
"name": "SmallInstance",
"displaytext": "SmallInstance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 256,
},
"big":
{
# Big service offering ID to for change VM
"name": "BigInstance",
"displaytext": "BigInstance",
"cpunumber": 1,
"cpuspeed": 100,
"memory": 512,
}
},
#Change this
"template": {
"displaytext": "xs",
"name": "xs",
"passwordenabled": False,
},
"sleep": 60,
"timeout": 10,
#Migrate VM to hostid
"ostype": 'CentOS 5.3 (64-bit)',
# CentOS 5.3 (64-bit)
}
class TestScaleVm(cloudstackTestCase):
@classmethod
def setUpClass(cls):
cls.api_client = super(TestScaleVm, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
domain = get_domain(cls.api_client, cls.services)
zone = get_zone(cls.api_client, cls.services)
cls.services['mode'] = zone.networktype
template = get_template(
cls.api_client,
zone.id,
cls.services["ostype"]
)
# Set Zones and disk offerings ??
cls.services["small"]["zoneid"] = zone.id
cls.services["small"]["template"] = template.id
# Create account, service offerings, vm.
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=domain.id
)
cls.small_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["small"]
)
cls.big_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offerings"]["big"]
)
#create a virtual machine
cls.virtual_machine = VirtualMachine.create(
cls.api_client,
cls.services["small"],
accountid=cls.account.name,
domainid=cls.account.domainid,
serviceofferingid=cls.small_offering.id,
mode=cls.services["mode"]
)
cls._cleanup = [
cls.small_offering,
cls.account
]
@classmethod
def tearDownClass(cls):
cls.api_client = super(TestScaleVm, cls).getClsTestClient().getApiClient()
cleanup_resources(cls.api_client, cls._cleanup)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = []
def tearDown(self):
#Clean up, terminate the created ISOs
cleanup_resources(self.apiclient, self.cleanup)
return
@attr(hypervisor="xenserver")
@attr(tags=["advanced", "basic"])
def test_01_scale_vm(self):
"""Test scale virtual machine
"""
# Validate the following
# Scale up the vm and see if it scales to the new svc offering and is finally in running state
self.debug("Scaling VM-ID: %s to service offering: %s and state %s" % (
self.virtual_machine.id,
self.big_offering.id,
self.virtual_machine.state
))
cmd = scaleVirtualMachine.scaleVirtualMachineCmd()
cmd.serviceofferingid = self.big_offering.id
cmd.id = self.virtual_machine.id
self.apiclient.scaleVirtualMachine(cmd)
list_vm_response = VirtualMachine.list(
self.apiclient,
id=self.virtual_machine.id
)
self.assertEqual(
isinstance(list_vm_response, list),
True,
"Check list response returns a valid list"
)
self.assertNotEqual(
list_vm_response,
None,
"Check virtual machine is listVirtualMachines"
)
vm_response = list_vm_response[0]
self.assertEqual(
vm_response.id,
self.virtual_machine.id,
"Check virtual machine ID of scaled VM"
)
# VirtualMachine should be updated to tell cloudstack it has PV tools
# available and successfully scaled. We will only mock that behaviour
# here but it is not expected in production since the VM scaling is not
# guaranteed until tools are installed, vm rebooted
self.virtual_machine.update(self.apiclient, isdynamicallyscalable='true')
self.debug("Scaling VM-ID: %s from service offering: %s to new service offering %s and the response says %s" % (
self.virtual_machine.id,
self.virtual_machine.serviceofferingid,
self.big_offering.id,
vm_response.serviceofferingid
))
self.assertEqual(
vm_response.serviceofferingid,
self.big_offering.id,
"Check service offering of the VM"
)
self.assertEqual(
vm_response.state,
'Running',
"Check the state of VM"
)
return
| |
import numpy as np
from mir_eval.melody import hz2cents
import scipy.signal
class PeakStreamHelper(object):
def __init__(self, S, times, freqs, amp_thresh, dev_thresh, n_gap,
pitch_cont, peak_thresh=None):
'''Init method.
Parameters
----------
S : np.array
Salience matrix
times : np.array
Array of times in seconds
freqs : np.array
Array of frequencies in Hz
amp_thresh : float
Threshold on how big a peak must be relative to the maximum in its
frame.
dev_thresh : float
The maximum number of standard deviations below the mean a peak can
be to survive.
n_gap : float
Number of frames that can be taken from bad_peaks.
pitch_cont : float
Pitch continuity threshold in cents.
peak_thresh : float or None
threshold on peaks to separate good and bad peaks.
If set, peaks are split by threshold.
'''
self.S = S
self.S_norm = self._get_normalized_S()
self.times = times
self.freqs_hz = freqs
self.freqs = hz2cents(freqs)
self.amp_thresh = amp_thresh
self.dev_thresh = dev_thresh
self.n_gap = n_gap
self.pitch_cont = pitch_cont
self.peak_thresh = peak_thresh
if self.peak_thresh is None:
self.split_method = 'stats'
else:
self.split_method = 'thresh'
peaks = scipy.signal.argrelmax(S, axis=0)
self.n_peaks = len(peaks[0])
if self.n_peaks > 0:
self.peak_index = np.arange(self.n_peaks)
self.peak_time_idx = peaks[1]
self.first_peak_time_idx = np.min(self.peak_time_idx)
self.last_peak_time_idx = np.max(self.peak_time_idx)
self.frame_dict = self._get_frame_dict()
self.peak_freqs = self.freqs[peaks[0]]
self.peak_freqs_hz = self.freqs_hz[peaks[0]]
self.peak_amps = self.S[peaks[0], peaks[1]]
self.peak_amps_norm = self.S_norm[peaks[0], peaks[1]]
self.good_peaks, self.bad_peaks = self._partition_peaks()
(self.good_peaks_sorted,
self.good_peaks_sorted_index,
self.good_peaks_sorted_avail,
self.n_good_peaks) = self._create_good_peak_index()
self.smallest_good_peak_idx = 0
else:
self.peak_index = np.array([])
self.peak_time_idx = np.array([])
self.first_peak_time_idx = None
self.last_peak_time_idx = None
self.frame_dict = {}
self.peak_freqs = np.array([])
self.peak_freqs_hz = np.array([])
self.peak_amps = np.array([])
self.peak_amps_norm = np.array([])
self.good_peaks = set()
self.bad_peaks = set()
self.good_peaks_sorted = []
self.good_peaks_sorted_index = {}
self.good_peaks_sorted_avail = np.array([])
self.n_good_peaks = 0
self.smallest_good_peak_idx = 0
self.gap = 0
self.n_remaining = len(self.good_peaks)
self.contour_idx = []
self.c_len = []
def _get_normalized_S(self):
"""Compute normalized salience matrix
Returns
-------
S_norm : np.array
Normalized salience matrix.
"""
S_min = np.min(self.S, axis=0)
S_norm = self.S - S_min
S_max = np.max(S_norm, axis=0)
S_max[S_max == 0] = 1.0
S_norm = S_norm / S_max
return S_norm
def _get_frame_dict(self):
"""Get dictionary of frame index to peak index.
Returns
-------
frame_dict : dict
Dictionary mapping frame index to lists of peak indices
"""
frame_dict = {k: [] for k in range(len(self.times))}
for i, k in enumerate(self.peak_time_idx):
frame_dict[k].append(i)
for k, v in frame_dict.items():
frame_dict[k] = np.array(v)
return frame_dict
def _partition_peaks(self):
"""Split peaks into good peaks and bad peaks.
Returns
-------
good_peaks : set
Set of good peak indices
bad_peaks : set
Set of bad peak indices
"""
good_peaks = set(self.peak_index)
bad_peaks = set()
if self.split_method == 'stats':
# peaks with amplitude below a threshold --> bad peaks
bad_peak_idx = np.where(self.peak_amps_norm < self.amp_thresh)[0]
bad_peaks.update(bad_peak_idx)
# find indices of surviving peaks
good_peaks.difference_update(bad_peaks)
# compute mean and standard deviation of amplitudes of survivors
mean_peak = np.mean(self.peak_amps[bad_peak_idx])
std_peak = np.std(self.peak_amps[bad_peak_idx])
# peaks with amplitude too far below the mean --> bad peaks
bad_peaks.update(np.where(
self.peak_amps < (
mean_peak - (self.dev_thresh * std_peak)))[0])
good_peaks.difference_update(bad_peaks)
elif self.split_method == 'thresh':
bad_peak_idx = np.where(self.peak_amps < self.peak_thresh)[0]
bad_peaks.update(bad_peak_idx)
good_peaks.difference_update(bad_peaks)
else:
raise ValueError("invalid split method")
return good_peaks, bad_peaks
def _create_good_peak_index(self):
"""Create a sorted index of peaks by amplitude.
Returns
-------
good_peaks_sorted : np.ndarray
Array of peak indices ordered by peak amplitude
good_peaks_sorted_index : dict
Dictionary mapping peak index to its position in good_peaks_sorted
good_peaks_sorted_avail : np.ndarray
Array of booleans indicating if a good peak has been used
n_good_peaks : int
Number of initial good peaks
"""
good_peak_list = list(self.good_peaks)
sort_idx = list(self.peak_amps[good_peak_list].argsort()[::-1])
good_peaks_sorted = np.array(good_peak_list)[sort_idx]
good_peaks_sorted_index = {
j: i for i, j in enumerate(good_peaks_sorted)
}
n_good_peaks = len(good_peak_list)
good_peaks_sorted_avail = np.ones((n_good_peaks, )).astype(bool)
return (good_peaks_sorted, good_peaks_sorted_index,
good_peaks_sorted_avail, n_good_peaks)
def get_largest_peak(self):
"""Get the largest remaining good peak.
Returns
-------
max_peak_idx : int
Index of the largest remaining good peak
"""
return self.good_peaks_sorted[self.smallest_good_peak_idx]
def update_largest_peak_list(self, peak_index):
"""Update the list of largest peaks
Parameters
----------
peak_index : int
Index of the largest remaining good peak
"""
this_sorted_idx = self.good_peaks_sorted_index[peak_index]
self.good_peaks_sorted_avail[this_sorted_idx] = False
if this_sorted_idx <= self.smallest_good_peak_idx:
i = this_sorted_idx
while i < self.n_good_peaks:
if self.good_peaks_sorted_avail[i]:
self.smallest_good_peak_idx = i
break
else:
i += 1
def get_closest_peak(self, current_f0, candidates):
"""Find the peak in `candidates` closest in frequency to `current_f0`.
Parameters
----------
current_f0 : float
Current frequency value
candidates : list
List of peak candidates
Returns
-------
closest_peak_idx : int
Index of the closest peak to `current_f0`
"""
min_dist = np.argmin(np.abs(self.peak_freqs[candidates] - current_f0))
return candidates[min_dist]
def get_peak_candidates(self, frame_idx, current_f0):
"""Get candidates in frame_idx at current_f0
Parameters
----------
frame_idx : int
Frame index
current_f0 : float
Current frequency value
Returns
-------
candidates : list or None
List of peak candidates. None if no available peaks.
from_good : bool or None
True if candidates are "good", False if they are "bad",
None if no available peaks.
"""
# find candidates in time frame
all_cands = self.frame_dict[frame_idx]
if len(all_cands) == 0:
return None, None
# restrict to frames that satisfy pitch continuity
all_cands = set(all_cands[
np.abs(self.peak_freqs[all_cands] - current_f0) < self.pitch_cont
])
if len(all_cands) == 0:
return None, None
cands = list(all_cands & self.good_peaks)
if len(cands) > 0:
self.gap = 0
return cands, True
bad_cands = list(all_cands & self.bad_peaks)
if len(bad_cands) > 0:
self.gap += 1
return bad_cands, False
return None, None
def get_contour(self):
"""Get the next contour.
Appends to `self.contour_idx` and `self.c_len`
Removes peaks from `self.good_peaks` and `self.bad_peaks`
as they are selected.
"""
largest_peak = self.get_largest_peak()
# time frame and freqency index of largest peak
frame_idx = self.peak_time_idx[largest_peak]
f0_val = self.peak_freqs[largest_peak]
self.good_peaks.remove(largest_peak)
self.update_largest_peak_list(largest_peak)
self.n_remaining -= 1
self.contour_idx.append(largest_peak)
self.gap = 0
c_len = 1
# choose forward peaks for this contour
while self.gap < self.n_gap:
# go to next time frame
frame_idx = frame_idx + 1
if frame_idx > self.last_peak_time_idx:
break
cands, from_good = self.get_peak_candidates(frame_idx, f0_val)
if cands is None:
break
closest_peak = self.get_closest_peak(f0_val, cands)
# add this peak to the contour, remove it from candidates
self.contour_idx.append(closest_peak)
c_len += 1
if from_good:
self.good_peaks.remove(closest_peak)
self.update_largest_peak_list(closest_peak)
self.n_remaining -= 1
else:
self.bad_peaks.remove(closest_peak)
# update target frequency
f0_val = self.peak_freqs[closest_peak]
# choose backward peaks for this contour
frame_idx = self.peak_time_idx[largest_peak]
f0_val = self.peak_freqs[largest_peak]
self.gap = 0
while self.gap < self.n_gap:
# go to previous time frame
frame_idx = frame_idx - 1
if frame_idx < self.first_peak_time_idx:
break
cands, from_good = self.get_peak_candidates(frame_idx, f0_val)
if cands is None:
break
closest_peak = self.get_closest_peak(f0_val, cands)
# add this peak to the contour, change its label to 0
self.contour_idx.append(closest_peak)
c_len += 1
if from_good:
self.good_peaks.remove(closest_peak)
self.update_largest_peak_list(closest_peak)
self.n_remaining -= 1
else:
self.bad_peaks.remove(closest_peak)
# update target frequency
f0_val = self.peak_freqs[closest_peak]
self.c_len.append(c_len)
def peak_streaming(self):
"""Run peak streaming over salience function
Returns
-------
c_numbers : np.array
Contour numbers
c_times : np.array
Contour times in seconds
c_freqs : np.array
Contour frequencies
c_sal : np.array
Contour salience
"""
# loop until there are no remaining peaks labeled with 1
while self.n_remaining > 0:
# print(self.n_remaining)
self.get_contour()
if len(self.c_len) > 0:
c_numbers = np.repeat(range(len(self.c_len)), repeats=self.c_len)
c_times = self.times[self.peak_time_idx[self.contour_idx]]
c_freqs = self.peak_freqs_hz[self.contour_idx]
c_sal = self.peak_amps[self.contour_idx]
else:
c_numbers = np.array([])
c_times = np.array([])
c_freqs = np.array([])
c_sal = np.array([])
return c_numbers, c_times, c_freqs, c_sal
| |
import json
from django import forms
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
class SelectizeSelect(forms.widgets.Select):
class Media:
css = {
'all': (
'selectize/dist/css/selectize.bootstrap3.css',
)
}
js = (
'selectize/dist/js/standalone/selectize.min.js',
)
def render(self, name, value, attrs=None):
html = super(SelectizeSelect, self).render(name, value, attrs)
value = value or []
script = '<script type="text/javascript"> \
$(function() { \
$("#%s").selectize({ \
sortField: "text" \
}); \
}); \
</script>' % attrs['id']
return mark_safe(''.join(html + script))
class SelectizeSelectMultiple(forms.widgets.SelectMultiple):
class Media:
css = {
'all': (
'selectize/dist/css/selectize.bootstrap3.css',
)
}
js = (
'selectize/dist/js/standalone/selectize.min.js',
)
def render(self, name, value, attrs=None):
html = super(SelectizeSelectMultiple, self).render(name, value, attrs)
value = value or []
script = '<script type="text/javascript"> \
$(function() { \
$("#%s").selectize({ \
plugins: ["remove_button"], \
items: %s \
}); \
}); \
</script>' % (attrs['id'], '[%s]' % ','.join(map(str, value)))
return mark_safe(''.join(html + script))
class SelectizeSelectMultipleCSVInput(forms.widgets.SelectMultiple):
"""
A selectize.js widget that allows selecting multiple existing values and adding new values.
New values are prefixed with the value supplied by the prefix construction parameter.
"""
class Media:
css = {
'all': ('selectize/dist/css/selectize.bootstrap3.css',)
}
js = ('selectize/dist/js/standalone/selectize.min.js',)
def __init__(self, prefix, *args, **kwargs):
super(SelectizeSelectMultipleCSVInput, self).__init__(*args, **kwargs)
self.prefix = prefix
def render(self, name, value, attrs=None):
html = super(SelectizeSelectMultipleCSVInput, self).render(name, value, attrs)
value = value or []
script = '<script type="text/javascript"> \
$(function() { \
$("#%s").selectize({ \
plugins: ["remove_button"], \
items: %s, \
create: function(input) { \
return { \
value: \"%s\" + input, \
text: input \
} \
} \
}); \
}); \
</script>' % (attrs['id'], '["%s"]' % '","'.join(map(str, value)), self.prefix)
return mark_safe(''.join(html + script))
class SelectizeCSVInput(forms.widgets.TextInput):
class Media:
css = {
'all': (
'selectize/dist/css/selectize.bootstrap3.css',
)
}
js = (
'selectize/dist/js/standalone/selectize.min.js',
)
def render(self, name, value, attrs=None):
html = super(SelectizeCSVInput, self).render(name, value, attrs)
value = value or ''
script = '<script type="text/javascript"> \
$(function() { \
$("#%s").selectize({ \
plugins: ["remove_button"], \
items: %s, \
create: function(input) { \
return { \
value: input, \
text: input \
} \
} \
}); \
}); \
</script>' % (attrs['id'], '["%s"]' % value.replace(',', '","'))
return mark_safe(''.join(html + script))
class BootstrapPicker(object):
class Media:
css = {
'all': (
'eonasdan-bootstrap-datetimepicker/build/css/bootstrap-datetimepicker.min.css',
)
}
js = (
'moment/min/moment-with-locales.min.js',
'eonasdan-bootstrap-datetimepicker/build/js/bootstrap-datetimepicker.min.js',
)
def render(self, name, value, attrs=None):
attrs = {} if attrs is None else attrs
attrs.update({'class': 'form-control'})
html = super(BootstrapPicker, self).render(name, value, attrs)
html = '<div class="input-group date"> \
%s \
<div class="input-group-addon"> \
<span class="glyphicon glyphicon-calendar"></span> \
</div> \
</div>' % html
html += '<script type="text/javascript"> \
$(function() { \
$("#%s").datetimepicker(%s); \
}); \
</script>' % (attrs['id'], json.dumps(self.config()))
return mark_safe(html)
def config(self):
return {
'locale': 'de',
'showClose': True,
'useCurrent': False,
}
class ClearableBootstrapPickerMixin(object):
def config(self):
config = super(ClearableBootstrapPickerMixin, self).config()
config.update({'showClear': True})
return config
class BootstrapDatePicker(BootstrapPicker, forms.widgets.DateInput):
def config(self):
config = super(BootstrapDatePicker, self).config()
config.update({
'format': 'YYYY-MM-DD',
'viewMode': 'days',
})
return config
class BootstrapTimePicker(BootstrapPicker, forms.widgets.TimeInput):
def config(self):
config = super(BootstrapTimePicker, self).config()
config.update({
'format': 'HH:mm',
})
return config
class ClearableBootstrapDatePicker(ClearableBootstrapPickerMixin, BootstrapDatePicker):
pass
class ClearableBootstrapTimePicker(ClearableBootstrapPickerMixin, BootstrapTimePicker):
pass
class AttachmentInput(forms.widgets.ClearableFileInput):
template_with_initial = (
'<br />%(initial_text)s: <a href="%(initial_url)s">%(initial)s</a> '
'%(clear_template)s<br />'
'<div style="float:left; margin-right:10px;">%(input_text)s:</div>'
'<span style="display: block; overflow: hidden;">%(input)s</span>'
)
def get_template_substitution_values(self, value):
"""Show attachment's name instead of its URL."""
return {
'initial': conditional_escape(value.instance.name),
'initial_url': conditional_escape(value.url),
}
| |
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
import random
from tests.ggrc import TestCase
from freezegun import freeze_time
from datetime import datetime
from mock import patch
import os
from ggrc import notification
from ggrc.models import NotificationConfig, Notification, Person
from tests.ggrc_workflows.generator import WorkflowsGenerator
from tests.ggrc.api_helper import Api
from tests.ggrc.generator import GgrcGenerator
if os.environ.get('TRAVIS', False):
random.seed(1) # so we can reproduce the tests if needed
class TestEnableAndDisableNotifications(TestCase):
""" This class contains simple one time workflow tests that are not
in the gsheet test grid
"""
def setUp(self):
TestCase.setUp(self)
self.api = Api()
self.wf_generator = WorkflowsGenerator()
self.ggrc_generator = GgrcGenerator()
Notification.query.delete()
self.random_objects = self.ggrc_generator.generate_random_objects(2)
_, self.user = self.ggrc_generator.generate_person(user_role="gGRC Admin")
self.create_test_cases()
def init_decorator(init):
def new_init(self, *args, **kwargs):
init(self, *args, **kwargs)
if hasattr(self, "created_at"):
self.created_at = datetime.now()
return new_init
Notification.__init__ = init_decorator(Notification.__init__)
@patch("ggrc.notification.email.send_email")
def test_default_notificaitons_settings(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-01 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(user.email, notif_data)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_disabled_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", False)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-01 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(user.email, notif_data)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertNotIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_enabled_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
with freeze_time("2015-01-29 13:39:20"):
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", True)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_forced_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf_forced)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", True)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
@patch("ggrc.notification.email.send_email")
def test_force_one_wf_notifications(self, mock_mail):
with freeze_time("2015-02-01 13:39:20"):
_, wf_forced = self.wf_generator.generate_workflow(self.quarterly_wf_forced)
response, wf_forced = self.wf_generator.activate_workflow(wf_forced)
_, wf = self.wf_generator.generate_workflow(self.quarterly_wf)
response, wf = self.wf_generator.activate_workflow(wf)
self.assert200(response)
user = Person.query.get(self.user.id)
with freeze_time("2015-01-29 13:39:20"):
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("cycle_starts_in", notif_data[user.email])
self.assertIn(wf_forced.id, notif_data[user.email]["cycle_starts_in"])
self.assertIn(wf.id, notif_data[user.email]["cycle_starts_in"])
self.ggrc_generator.generate_notification_setting(
self.user.id, "Email_Digest", False)
user = Person.query.get(self.user.id)
_, notif_data = notification.get_todays_notifications()
self.assertIn(user.email, notif_data)
self.assertIn("cycle_starts_in", notif_data[user.email])
self.assertIn(wf_forced.id, notif_data[user.email]["cycle_starts_in"])
self.assertNotIn(wf.id, notif_data[user.email]["cycle_starts_in"])
def create_test_cases(self):
def person_dict(person_id):
return {
"href": "/api/people/%d" % person_id,
"id": person_id,
"type": "Person"
}
self.quarterly_wf_forced = {
"title": "quarterly wf forced notification",
"notify_on_change": True,
"description": "",
"owners": [person_dict(self.user.id)],
"frequency": "quarterly",
"task_groups": [{
"title": "tg_1",
"contact": person_dict(self.user.id),
"task_group_tasks": [{
"contact": person_dict(self.user.id),
"description": self.wf_generator.random_str(100),
"relative_start_day": 5,
"relative_start_month": 2,
"relative_end_day": 25,
"relative_end_month": 2,
},
],
},
]
}
self.quarterly_wf = {
"title": "quarterly wf 1",
"description": "",
"owners": [person_dict(self.user.id)],
"frequency": "quarterly",
"task_groups": [{
"title": "tg_1",
"contact": person_dict(self.user.id),
"task_group_tasks": [{
"contact": person_dict(self.user.id),
"description": self.wf_generator.random_str(100),
"relative_start_day": 5,
"relative_start_month": 2,
"relative_end_day": 25,
"relative_end_month": 2,
},
],
},
]
}
| |
# coding: utf-8
import time
import random
from jsondb.db import Database
from tinydb import TinyDB, Query, where
from colorama import init, Fore, Back, Style
from thirdworldwar import *
class Bot:
# Constants
CONST_SERVERS = {1:"TERRA", 2: "ANDHERRA", 3: "DUNDHA"}
# Variables
_groupID = -1
_actionLevel = None
_pauseTimestamp = 0
account = None
db = {}
isLogged = False
def __init__(self, u, p, s, gid):
self._groupID = gid
self.loadDatabases()
self.account = ThirdWorldWar(u, p, s)
self.isLogged = self.account.rLogin()
self.log('+' if self.isLogged else '-',
"Login as \""+ self.CONST_SERVERS[s] +"/"+ u +"\"", False)
self._actionLevel = self._getGroupLevel()
# self.setPause()
def __del__(self):
self.isLogged = False if self.account.rLogout() else True
self.log('-' if self.isLogged else '+',
"Disconnect from \""+ self.CONST_SERVERS[self.account.account['serveur']] +"/"+ self.account.account['email'] +"\"", False)
def hello(self):
if not self._isPauseFinished():
return False
self.log('?', "Selecting \""+ self.CONST_SERVERS[self.account.account['serveur']] +"/"+ self.account.account['email'] +"\"", True)
self.log('?', "Group \""+self._getGroupData(self._groupID)['name']+"\" [ID:"+str(self._groupID)+"] / Level "+str(self._actionLevel))
self.functions()
self.workers()
def loadDatabases(self):
self.db['groups'] = Database("cache/db/groups.json")
self.db['maps'] = TinyDB('cache/db/maps.json')
self.db['ranks'] = TinyDB('cache/db/ranks.json', sort_keys=True, indent=4)
def setPause(self):
if time.time() >= self._pauseTimestamp:
self._pauseTimestamp = time.time() + (random.randint(random.randint(0,10), random.randint(30,50))*60)
def _isPauseFinished(self):
return True if time.time() >= self._pauseTimestamp else False
def functions(self):
if not self._isPauseFinished():
return False
try:
group = self._getGroupData(self._groupID)
if group != None and "functions" in group:
self.log('!', "Functions !", True)
for f in group['functions']:
self.log("?", "\tFunction "+f+" is define in groups..")
userFunc = getattr(self, f)
self.log("!", "\tCalling "+f+"()...")
userFunc()
except AttributeError:
self.log("-", "\tFunction does not exist !", True)
pass
def _fncChatBotAlly(self):
messages = self.account.getChatAlliance()
for message in messages:
m = message['message'].split(' ')
if (message['elasped']/60) > 12: break
if m[0] == "!base":
self._fncChatBotAlly_cmdBase(m)
elif m[0] == "!who_plays":
self._fncChatBotAlly_cmdWhoPlays(m)
break
def _fncChatBotAlly_cmdBase(self, m):
if len(m) >= 2:
self.log("+", "\t\tReceive command !base")
pseudo = ""
i = 1
while i<len(m):
pseudo = pseudo + " " + m[i]
pseudo = pseudo.strip()
i = i +1
self.log("?", "\t\tSearching bases for user \"" + pseudo + "\"..")
table = self.db['maps'].table('server_'+str(self.account.account['serveur']))
scan_date = table.get(where('id'), len(table))
scan_date = scan_date['scan_date']
bases = table.search((where('scan_date') == scan_date) & (where('user') == pseudo))
rapport = "Extract from " + str(scan_date) + " scan !\n\n"
rapport = rapport + "Wanted : @"+pseudo+"\n"
rapport = rapport + "---\n"
for base in bases:
if 'x' in base and 'y' in base and 'name' in base:
rapport = rapport + "- " + base['name'] + " | " + str(base['x']) + "-" + str(base['y']) + "\n"
rapport = rapport + "---\n"
rapport = rapport + "OVER !"
self.account.rSendMessageToAlliance(rapport)
else :
log("-", "Receive command !base, but no args..")
def _fncChatBotAlly_cmdWhoPlays(self, m):
self.log("+", "\t\tReceive command !who_plays")
table = self.db['ranks'].table('server_'+str(self.account.account['serveur']))
# scan_date = table.get(where('id'), len(table))
# scan_date = scan_date['scan_date']
ranks_old = table.all()
ranks_now = self.account.getRanking()
self.log("?", "\t\tSearching for actives users..")
rapport = "Active players for last 5 days :\n\n"
count = 0
for rn in ranks_now:
for ro in ranks_old:
if rn['user'] == ro['user']:
if (ro['points'] - rn['points']) != 0:
count = count +1
rapport = rapport + "- @" + rn['user'] + " ("+str((rn['points'] - ro['points']))+" pts)\n"
if count >= 7:
count = 0
self.account.rSendMessageToAlliance(rapport)
rapport = ""
break
rapport = rapport + "OVER !"
self.account.rSendMessageToAlliance(rapport)
def workers(self):
if not self._isPauseFinished():
return False
self.log('!', "Workers !", True)
self._actionLevel = self._getGroupLevel()
self._workersActionsForBuildings()
self._workersActionsForTechnology()
self._workersActionsForTroops()
def _workersActionsForBuildings(self):
feeds = self.account.getFeeds()
b, index = self._isIn("Buildings", feeds, "name")
if not b: return
if int(feeds[index]['actives']) >= int(feeds[index]['max']):
self.log("-", "\tBuildings feed is full !", True)
return
actions = self._getGroupActionForLevel(self._actionLevel)
if actions == None: return
builds = self.account.getBuildings()
for action in actions:
kword = "entity"
b, bIndex = self._isIn(action[kword], builds, "name")
if not b:
continue
if builds[bIndex]['available'] == True and int(action['level']) > int(builds[bIndex]['level']):
isOk = self.account.rStartBuilding(builds[bIndex]["name"])
cFeeds = self.account.getFeeds()
cb, cIndex = self._isIn("Buildings", cFeeds, "name")
isOk = True if int(cFeeds[cIndex]['actives']) == int(feeds[index]['actives']) +1 else False
self.log(
'+' if isOk else '-',
"\tStarting building \""+builds[bIndex]["name"]+"\" (lvl:"+builds[bIndex]["level"]+"/"+str(action['level'])+")",
True
)
feeds[index]['actives'] = int(feeds[index]['actives']) +1 if isOk else int(feeds[index]['actives'])
if int(feeds[index]['actives']) >= int(feeds[index]['max']):
return
return
def _workersActionsForTechnology(self):
actions = self._getGroupActionForLevel(self._actionLevel)
if actions == None: return
builds = self.account.getTechnology()
for action in actions:
b, bIndex = self._isIn(action['entity'], builds, "name")
if not b:
continue
if int(builds[bIndex]['level']) < int(action['level']):
if builds[bIndex]['activable']:
isOk = self.account.rTechActivate(action['entity'])
self.log('+' if isOk else "-", "\tTech \""+action['entity']+"\" activated!", True)
elif builds[bIndex]['available'] == True and not builds[bIndex]['feed']:
isOk = self.account.rTechStart(action['entity'])
checkBuilds = self.account.getTechnology()
cb, cbIndex = self._isIn(action['entity'], checkBuilds, "name")
if cb:
self.log('+' if isOk and checkBuilds[cbIndex]['feed'] else '-', "\tStarting technology \""+action['entity']+"\"", True)
def _workersActionsForTroops(self):
n =0
def _getGroupData(self, gid):
for grp in self.db['groups'].data()['groups']:
if grp['id'] == gid:
return grp
return None
def _getGroupActions(self):
actions = []
group = self._getGroupData(self._groupID)
if group == None: return None
if "actions_from" in group:
group = self._getGroupData(group['actions_from'])
if group == None: return None
for action in group['actions']:
actions.append(action)
return actions if len(actions) >0 else None
def _isIn(self, value, into, kword):
count = 0
for i in into:
if i[kword] == value:
return True, count
count = count +1
return False, None
def _getGroupLevel(self):
userBuilds = self.account.getBuildings()
userBuilds = userBuilds + self.account.getTechnology()
userBuilds = userBuilds + self.account.getTroops()
level = 0
while level != None:
actions = self._getGroupActionForLevel(level)
if actions == None: return None
for action in actions:
b, index = self._isIn(action['entity'], userBuilds, 'name')
if not b:
b, index = self._isIn(action['entity'], userBuilds, 'alias')
if not b: return level
if int(userBuilds[index]['level']) < int(action['level']):
return level
level = level +1
def _getGroupActionForLevel(self, lvl):
actionsForLevel = []
actionsAll = self._getGroupActions()
if actionsAll == None: return None
for action in actionsAll:
if action['id'] == lvl:
actionsForLevel.append(action)
return actionsForLevel if len(actionsForLevel) >0 else None
def log(self, prefix, text, line=False):
now = datetime.now()
message = ""
if prefix == '?':
c = Fore.CYAN
elif prefix == '+':
c = Fore.GREEN
elif prefix == '-':
c = Fore.RED
elif prefix == '!':
c = Fore.YELLOW
c = Style.BRIGHT + c
e = Style.RESET_ALL + Fore.RESET
if line:
print c+"["+now.strftime("%Y-%m-%d %H:%M:%S")+"]["+prefix+"] "+text+e
else :
print "["+now.strftime("%Y-%m-%d %H:%M:%S")+"]["+c+prefix+e+"] "+text
| |
import logging
from mopidy import models
import spotify
from mopidy_spotify import countries, playlists, translator
from mopidy_spotify.utils import flatten
logger = logging.getLogger(__name__)
ROOT_DIR = models.Ref.directory(uri="spotify:directory", name="Spotify")
_TOP_LIST_DIR = models.Ref.directory(uri="spotify:top", name="Top lists")
_YOUR_MUSIC_DIR = models.Ref.directory(uri="spotify:your", name="Your music")
_PLAYLISTS_DIR = models.Ref.directory(uri="spotify:playlists", name="Playlists")
_ROOT_DIR_CONTENTS = [
_TOP_LIST_DIR,
_YOUR_MUSIC_DIR,
_PLAYLISTS_DIR,
]
_TOP_LIST_DIR_CONTENTS = [
models.Ref.directory(uri="spotify:top:tracks", name="Top tracks"),
models.Ref.directory(uri="spotify:top:albums", name="Top albums"),
models.Ref.directory(uri="spotify:top:artists", name="Top artists"),
]
_YOUR_MUSIC_DIR_CONTENTS = [
models.Ref.directory(uri="spotify:your:tracks", name="Your tracks"),
models.Ref.directory(uri="spotify:your:albums", name="Your albums"),
]
_PLAYLISTS_DIR_CONTENTS = [
models.Ref.directory(uri="spotify:playlists:featured", name="Featured"),
]
_TOPLIST_TYPES = {
"albums": spotify.ToplistType.ALBUMS,
"artists": spotify.ToplistType.ARTISTS,
"tracks": spotify.ToplistType.TRACKS,
}
_TOPLIST_REGIONS = {
"country": lambda session: session.user_country,
"everywhere": lambda session: spotify.ToplistRegion.EVERYWHERE,
}
def browse(*, config, session, web_client, uri):
if uri == ROOT_DIR.uri:
return _ROOT_DIR_CONTENTS
elif uri == _TOP_LIST_DIR.uri:
return _TOP_LIST_DIR_CONTENTS
elif uri == _YOUR_MUSIC_DIR.uri:
return _YOUR_MUSIC_DIR_CONTENTS
elif uri == _PLAYLISTS_DIR.uri:
return _PLAYLISTS_DIR_CONTENTS
elif uri.startswith("spotify:user:") or uri.startswith("spotify:playlist:"):
return _browse_playlist(session, web_client, uri, config)
elif uri.startswith("spotify:album:"):
return _browse_album(session, uri, config)
elif uri.startswith("spotify:artist:"):
return _browse_artist(session, uri, config)
elif uri.startswith("spotify:top:"):
parts = uri.replace("spotify:top:", "").split(":")
if len(parts) == 1:
return _browse_toplist_regions(variant=parts[0])
elif len(parts) == 2:
if parts[1] == "user":
return _browse_toplist_user(web_client, variant=parts[0])
return _browse_toplist(
config, session, variant=parts[0], region=parts[1]
)
else:
logger.info(f"Failed to browse {uri!r}: Toplist URI parsing failed")
return []
elif uri.startswith("spotify:your:"):
parts = uri.replace("spotify:your:", "").split(":")
if len(parts) == 1:
return _browse_your_music(web_client, variant=parts[0])
elif uri.startswith("spotify:playlists:"):
parts = uri.replace("spotify:playlists:", "").split(":")
if len(parts) == 1:
return _browse_playlists(web_client, variant=parts[0])
logger.info(f"Failed to browse {uri!r}: Unknown URI type")
return []
def _browse_playlist(session, web_client, uri, config):
return playlists.playlist_lookup(
session, web_client, uri, config["bitrate"], as_items=True
)
def _browse_album(session, uri, config):
sp_album_browser = session.get_album(uri).browse()
sp_album_browser.load(config["timeout"])
return list(
translator.to_track_refs(
sp_album_browser.tracks, timeout=config["timeout"]
)
)
def _browse_artist(session, uri, config):
sp_artist_browser = session.get_artist(uri).browse(
type=spotify.ArtistBrowserType.NO_TRACKS
)
sp_artist_browser.load(config["timeout"])
top_tracks = list(
translator.to_track_refs(
sp_artist_browser.tophit_tracks, timeout=config["timeout"]
)
)
albums = list(
translator.to_album_refs(
sp_artist_browser.albums, timeout=config["timeout"]
)
)
return top_tracks + albums
def _browse_toplist_regions(variant):
dir_contents = [
models.Ref.directory(
uri=f"spotify:top:{variant}:country", name="Country"
),
models.Ref.directory(
uri=f"spotify:top:{variant}:countries", name="Other countries"
),
models.Ref.directory(
uri=f"spotify:top:{variant}:everywhere", name="Global"
),
]
if variant in ("tracks", "artists"):
dir_contents.insert(
0,
models.Ref.directory(
uri=f"spotify:top:{variant}:user", name="Personal"
),
)
return dir_contents
def _browse_toplist_user(web_client, variant):
if not web_client.logged_in:
return []
if variant in ("tracks", "artists"):
items = flatten(
[
page.get("items", [])
for page in web_client.get_all(
f"me/top/{variant}",
params={"limit": 50},
)
if page
]
)
if variant == "tracks":
return list(
translator.web_to_track_refs(items, check_playable=False)
)
else:
return list(translator.web_to_artist_refs(items))
else:
return []
def _browse_toplist(config, session, variant, region):
if region == "countries":
codes = config["toplist_countries"]
if not codes:
codes = countries.COUNTRIES.keys()
return [
models.Ref.directory(
uri=f"spotify:top:{variant}:{code.lower()}",
name=countries.COUNTRIES.get(code.upper(), code.upper()),
)
for code in codes
]
if region in ("country", "everywhere"):
sp_toplist = session.get_toplist(
type=_TOPLIST_TYPES[variant],
region=_TOPLIST_REGIONS[region](session),
)
elif len(region) == 2:
sp_toplist = session.get_toplist(
type=_TOPLIST_TYPES[variant], region=region.upper()
)
else:
return []
if session.connection.state is spotify.ConnectionState.LOGGED_IN:
sp_toplist.load(config["timeout"])
if not sp_toplist.is_loaded:
return []
if variant == "tracks":
return list(translator.to_track_refs(sp_toplist.tracks))
elif variant == "albums":
return list(
translator.to_album_refs(
sp_toplist.albums, timeout=config["timeout"]
)
)
elif variant == "artists":
return list(
translator.to_artist_refs(
sp_toplist.artists, timeout=config["timeout"]
)
)
else:
return []
def _browse_your_music(web_client, variant):
if not web_client.logged_in:
return []
if variant in ("tracks", "albums"):
items = flatten(
[
page.get("items", [])
for page in web_client.get_all(
f"me/{variant}",
params={"market": "from_token", "limit": 50},
)
if page
]
)
if variant == "tracks":
return list(translator.web_to_track_refs(items))
else:
return list(translator.web_to_album_refs(items))
else:
return []
def _browse_playlists(web_client, variant):
if not web_client.logged_in:
return []
if variant == "featured":
items = flatten(
[
page.get("playlists", {}).get("items", [])
for page in web_client.get_all(
"browse/featured-playlists",
params={"limit": 50},
)
if page
]
)
return list(translator.to_playlist_refs(items))
else:
return []
| |
#!/usr/bin/python
import sys
import StringIO
# Bitfield constants for the 'variant' argument to generate_sigs
Proj = 1
Offset = 2
Single = 4
def vec_type(g, size):
if size == 1:
if g == "i":
return "int"
elif g == "u":
return "uint"
return "float"
return g + "vec" + str(size)
# Get the sampler dimension - i.e. sampler3D gives 3
def get_sampler_dim(sampler_type):
if sampler_type[0].isdigit():
sampler_dim = int(sampler_type[0])
elif sampler_type.startswith("Cube"):
sampler_dim = 3
else:
assert False ("coord_dim: invalid sampler_type: " + sampler_type)
return sampler_dim
# Get the coordinate dimension for a given sampler type.
# Array samplers also get +1 here since the layer is really an extra coordinate
def get_coord_dim(sampler_type):
coord_dim = get_sampler_dim(sampler_type)
if sampler_type.find("Array") != -1:
coord_dim += 1
return coord_dim
# Get the number of extra vector components (i.e. shadow comparitor)
def get_extra_dim(sampler_type, use_proj, unused_fields):
extra_dim = unused_fields
if sampler_type.find("Shadow") != -1:
extra_dim += 1
if use_proj:
extra_dim += 1
return extra_dim
def generate_sigs(g, tex_inst, sampler_type, variant = 0, unused_fields = 0):
coord_dim = get_coord_dim(sampler_type)
extra_dim = get_extra_dim(sampler_type, variant & Proj, unused_fields)
offset_dim = get_sampler_dim(sampler_type)
if variant & Single:
return_type = "float"
else:
return_type = g + "vec4"
# Print parameters
print " (signature", return_type
print " (parameters"
print " (declare (in) " + g + "sampler" + sampler_type + " sampler)"
print " (declare (in) " + vec_type("i" if tex_inst == "txf" else "", coord_dim + extra_dim) + " P)",
if tex_inst == "txl":
print "\n (declare (in) float lod)",
elif tex_inst == "txf":
print "\n (declare (in) int lod)",
elif tex_inst == "txd":
grad_type = vec_type("", coord_dim)
print "\n (declare (in) " + grad_type + " dPdx)",
print "\n (declare (in) " + grad_type + " dPdy)",
if variant & Offset:
print "\n (declare (const_in) " + vec_type("i", offset_dim) + " offset)",
if tex_inst == "txb":
print "\n (declare (in) float bias)",
print ")\n ((return (" + tex_inst, return_type, "(var_ref sampler)",
# Coordinate
if extra_dim > 0:
print "(swiz " + "xyzw"[:coord_dim] + " (var_ref P))",
else:
print "(var_ref P)",
if variant & Offset:
print "(var_ref offset)",
else:
print "0",
if tex_inst != "txf":
# Projective divisor
if variant & Proj:
print "(swiz " + "xyzw"[coord_dim + extra_dim-1] + " (var_ref P))",
else:
print "1",
# Shadow comparitor
if sampler_type == "2DArrayShadow": # a special case:
print "(swiz w (var_ref P))", # ...array layer is z; shadow is w
elif sampler_type.endswith("Shadow"):
print "(swiz z (var_ref P))",
else:
print "()",
# Bias/explicit LOD/gradient:
if tex_inst == "txb":
print "(var_ref bias)",
elif tex_inst == "txl" or tex_inst == "txf":
print "(var_ref lod)",
elif tex_inst == "txd":
print "((var_ref dPdx) (var_ref dPdy))",
print "))))\n"
def generate_fiu_sigs(tex_inst, sampler_type, variant = 0, unused_fields = 0):
generate_sigs("", tex_inst, sampler_type, variant, unused_fields)
generate_sigs("i", tex_inst, sampler_type, variant, unused_fields)
generate_sigs("u", tex_inst, sampler_type, variant, unused_fields)
def start_function(name):
sys.stdout = StringIO.StringIO()
print "((function " + name
def end_function(fs, name):
print "))"
fs[name] = sys.stdout.getvalue();
sys.stdout.close()
# Generate all the functions and store them in the supplied dictionary.
# This is better than writing them to actual files since they should never be
# edited; it'd also be easy to confuse them with the many hand-generated files.
#
# Takes a dictionary as an argument.
def generate_texture_functions(fs):
start_function("texture")
generate_fiu_sigs("tex", "1D")
generate_fiu_sigs("tex", "2D")
generate_fiu_sigs("tex", "3D")
generate_fiu_sigs("tex", "Cube")
generate_fiu_sigs("tex", "1DArray")
generate_fiu_sigs("tex", "2DArray")
generate_sigs("", "tex", "1DShadow", Single, 1);
generate_sigs("", "tex", "2DShadow", Single);
generate_sigs("", "tex", "CubeShadow", Single);
generate_sigs("", "tex", "1DArrayShadow", Single);
generate_sigs("", "tex", "2DArrayShadow", Single);
generate_fiu_sigs("txb", "1D")
generate_fiu_sigs("txb", "2D")
generate_fiu_sigs("txb", "3D")
generate_fiu_sigs("txb", "Cube")
generate_fiu_sigs("txb", "1DArray")
generate_fiu_sigs("txb", "2DArray")
generate_sigs("", "txb", "1DShadow", Single, 1);
generate_sigs("", "txb", "2DShadow", Single);
generate_sigs("", "txb", "CubeShadow", Single);
generate_sigs("", "txb", "1DArrayShadow", Single);
generate_sigs("", "txb", "2DArrayShadow", Single);
end_function(fs, "texture")
start_function("textureProj")
generate_fiu_sigs("tex", "1D", Proj)
generate_fiu_sigs("tex", "1D", Proj, 2)
generate_fiu_sigs("tex", "2D", Proj)
generate_fiu_sigs("tex", "2D", Proj, 1)
generate_fiu_sigs("tex", "3D", Proj)
generate_sigs("", "tex", "1DShadow", Proj | Single, 1);
generate_sigs("", "tex", "2DShadow", Proj | Single);
generate_fiu_sigs("txb", "1D", Proj)
generate_fiu_sigs("txb", "1D", Proj, 2)
generate_fiu_sigs("txb", "2D", Proj)
generate_fiu_sigs("txb", "2D", Proj, 1)
generate_fiu_sigs("txb", "3D", Proj)
generate_sigs("", "txb", "1DShadow", Proj | Single, 1);
generate_sigs("", "txb", "2DShadow", Proj | Single);
end_function(fs, "textureProj")
start_function("textureLod")
generate_fiu_sigs("txl", "1D")
generate_fiu_sigs("txl", "2D")
generate_fiu_sigs("txl", "3D")
generate_fiu_sigs("txl", "Cube")
generate_fiu_sigs("txl", "1DArray")
generate_fiu_sigs("txl", "2DArray")
generate_sigs("", "txl", "1DShadow", Single, 1);
generate_sigs("", "txl", "2DShadow", Single);
generate_sigs("", "txl", "1DArrayShadow", Single);
end_function(fs, "textureLod")
start_function("textureLodOffset")
generate_fiu_sigs("txl", "1D", Offset)
generate_fiu_sigs("txl", "2D", Offset)
generate_fiu_sigs("txl", "3D", Offset)
generate_fiu_sigs("txl", "1DArray", Offset)
generate_fiu_sigs("txl", "2DArray", Offset)
generate_sigs("", "txl", "1DShadow", Offset | Single, 1);
generate_sigs("", "txl", "2DShadow", Offset | Single);
generate_sigs("", "txl", "1DArrayShadow", Offset | Single);
end_function(fs, "textureLodOffset")
start_function("textureOffset")
generate_fiu_sigs("tex", "1D", Offset)
generate_fiu_sigs("tex", "2D", Offset)
generate_fiu_sigs("tex", "3D", Offset)
generate_fiu_sigs("tex", "1DArray", Offset)
generate_fiu_sigs("tex", "2DArray", Offset)
generate_sigs("", "tex", "1DShadow", Offset | Single, 1);
generate_sigs("", "tex", "2DShadow", Offset | Single);
generate_sigs("", "tex", "1DArrayShadow", Offset | Single);
generate_fiu_sigs("txb", "1D", Offset)
generate_fiu_sigs("txb", "2D", Offset)
generate_fiu_sigs("txb", "3D", Offset)
generate_fiu_sigs("txb", "1DArray", Offset)
generate_fiu_sigs("txb", "2DArray", Offset)
generate_sigs("", "txb", "1DShadow", Offset | Single, 1);
generate_sigs("", "txb", "2DShadow", Offset | Single);
generate_sigs("", "txb", "1DArrayShadow", Offset | Single);
end_function(fs, "textureOffset")
start_function("texelFetch")
generate_fiu_sigs("txf", "1D")
generate_fiu_sigs("txf", "2D")
generate_fiu_sigs("txf", "3D")
generate_fiu_sigs("txf", "1DArray")
generate_fiu_sigs("txf", "2DArray")
end_function(fs, "texelFetch")
start_function("texelFetchOffset")
generate_fiu_sigs("txf", "1D", Offset)
generate_fiu_sigs("txf", "2D", Offset)
generate_fiu_sigs("txf", "3D", Offset)
generate_fiu_sigs("txf", "1DArray", Offset)
generate_fiu_sigs("txf", "2DArray", Offset)
end_function(fs, "texelFetchOffset")
start_function("textureProjOffset")
generate_fiu_sigs("tex", "1D", Proj | Offset)
generate_fiu_sigs("tex", "1D", Proj | Offset, 2)
generate_fiu_sigs("tex", "2D", Proj | Offset)
generate_fiu_sigs("tex", "2D", Proj | Offset, 1)
generate_fiu_sigs("tex", "3D", Proj | Offset)
generate_sigs("", "tex", "1DShadow", Proj | Offset | Single, 1);
generate_sigs("", "tex", "2DShadow", Proj | Offset | Single);
generate_fiu_sigs("txb", "1D", Proj | Offset)
generate_fiu_sigs("txb", "1D", Proj | Offset, 2)
generate_fiu_sigs("txb", "2D", Proj | Offset)
generate_fiu_sigs("txb", "2D", Proj | Offset, 1)
generate_fiu_sigs("txb", "3D", Proj | Offset)
generate_sigs("", "txb", "1DShadow", Proj | Offset | Single, 1);
generate_sigs("", "txb", "2DShadow", Proj | Offset | Single);
end_function(fs, "textureProjOffset")
start_function("textureProjLod")
generate_fiu_sigs("txl", "1D", Proj)
generate_fiu_sigs("txl", "1D", Proj, 2)
generate_fiu_sigs("txl", "2D", Proj)
generate_fiu_sigs("txl", "2D", Proj, 1)
generate_fiu_sigs("txl", "3D", Proj)
generate_sigs("", "txl", "1DShadow", Proj | Single, 1);
generate_sigs("", "txl", "2DShadow", Proj | Single);
end_function(fs, "textureProjLod")
start_function("textureProjLodOffset")
generate_fiu_sigs("txl", "1D", Proj | Offset)
generate_fiu_sigs("txl", "1D", Proj | Offset, 2)
generate_fiu_sigs("txl", "2D", Proj | Offset)
generate_fiu_sigs("txl", "2D", Proj | Offset, 1)
generate_fiu_sigs("txl", "3D", Proj | Offset)
generate_sigs("", "txl", "1DShadow", Proj | Offset | Single, 1);
generate_sigs("", "txl", "2DShadow", Proj | Offset | Single);
end_function(fs, "textureProjLodOffset")
start_function("textureGrad")
generate_fiu_sigs("txd", "1D")
generate_fiu_sigs("txd", "2D")
generate_fiu_sigs("txd", "3D")
generate_fiu_sigs("txd", "Cube")
generate_fiu_sigs("txd", "1DArray")
generate_fiu_sigs("txd", "2DArray")
generate_sigs("", "txd", "1DShadow", Single, 1);
generate_sigs("", "txd", "2DShadow", Single);
generate_sigs("", "txd", "CubeShadow", Single);
generate_sigs("", "txd", "1DArrayShadow", Single);
generate_sigs("", "txd", "2DArrayShadow", Single);
end_function(fs, "textureGrad")
start_function("textureGradOffset")
generate_fiu_sigs("txd", "1D", Offset)
generate_fiu_sigs("txd", "2D", Offset)
generate_fiu_sigs("txd", "3D", Offset)
generate_fiu_sigs("txd", "Cube", Offset)
generate_fiu_sigs("txd", "1DArray", Offset)
generate_fiu_sigs("txd", "2DArray", Offset)
generate_sigs("", "txd", "1DShadow", Offset | Single, 1);
generate_sigs("", "txd", "2DShadow", Offset | Single);
generate_sigs("", "txd", "1DArrayShadow", Offset | Single);
generate_sigs("", "txd", "2DArrayShadow", Offset | Single);
end_function(fs, "textureGradOffset")
start_function("textureProjGrad")
generate_fiu_sigs("txd", "1D", Proj)
generate_fiu_sigs("txd", "1D", Proj, 2)
generate_fiu_sigs("txd", "2D", Proj)
generate_fiu_sigs("txd", "2D", Proj, 1)
generate_fiu_sigs("txd", "3D", Proj)
generate_sigs("", "txd", "1DShadow", Proj | Single, 1);
generate_sigs("", "txd", "2DShadow", Proj | Single);
end_function(fs, "textureProjGrad")
start_function("textureProjGradOffset")
generate_fiu_sigs("txd", "1D", Proj | Offset)
generate_fiu_sigs("txd", "1D", Proj | Offset, 2)
generate_fiu_sigs("txd", "2D", Proj | Offset)
generate_fiu_sigs("txd", "2D", Proj | Offset, 1)
generate_fiu_sigs("txd", "3D", Proj | Offset)
generate_sigs("", "txd", "1DShadow", Proj | Offset | Single, 1);
generate_sigs("", "txd", "2DShadow", Proj | Offset | Single);
end_function(fs, "textureProjGradOffset")
# ARB_texture_rectangle extension
start_function("texture2DRect")
generate_sigs("", "tex", "2DRect")
end_function(fs, "texture2DRect")
start_function("texture2DRectProj")
generate_sigs("", "tex", "2DRect", Proj)
generate_sigs("", "tex", "2DRect", Proj, 1)
end_function(fs, "texture2DRectProj")
start_function("shadow2DRect")
generate_sigs("", "tex", "2DRectShadow")
end_function(fs, "shadow2DRect")
start_function("shadow2DRectProj")
generate_sigs("", "tex", "2DRectShadow", Proj)
end_function(fs, "shadow2DRectProj")
# EXT_texture_array extension
start_function("texture1DArray")
generate_sigs("", "tex", "1DArray")
generate_sigs("", "txb", "1DArray")
end_function(fs, "texture1DArray")
start_function("texture1DArrayLod")
generate_sigs("", "txl", "1DArray")
end_function(fs, "texture1DArrayLod")
start_function("texture2DArray")
generate_sigs("", "tex", "2DArray")
generate_sigs("", "txb", "2DArray")
end_function(fs, "texture2DArray")
start_function("texture2DArrayLod")
generate_sigs("", "txl", "2DArray")
end_function(fs, "texture2DArrayLod")
start_function("shadow1DArray")
generate_sigs("", "tex", "1DArrayShadow")
generate_sigs("", "txb", "1DArrayShadow")
end_function(fs, "shadow1DArray")
start_function("shadow1DArrayLod")
generate_sigs("", "txl", "1DArrayShadow")
end_function(fs, "shadow1DArrayLod")
start_function("shadow2DArray")
generate_sigs("", "tex", "2DArrayShadow")
end_function(fs, "shadow2DArray")
# ARB_shader_texture_lod extension
start_function("texture1DGradARB")
generate_fiu_sigs("txd", "1D")
end_function(fs, "texture1DGradARB")
start_function("texture2DGradARB")
generate_fiu_sigs("txd", "2D")
end_function(fs, "texture2DGradARB")
start_function("texture2DGradEXT")
generate_fiu_sigs("txd", "2D")
end_function(fs, "texture2DGradEXT")
start_function("texture3DGradARB")
generate_fiu_sigs("txd", "3D")
end_function(fs, "texture3DGradARB")
start_function("textureCubeGradARB")
generate_fiu_sigs("txd", "Cube")
end_function(fs, "textureCubeGradARB")
start_function("textureCubeGradEXT")
generate_fiu_sigs("txd", "Cube")
end_function(fs, "textureCubeGradEXT")
start_function("texture1DProjGradARB")
generate_fiu_sigs("txd", "1D", True)
generate_fiu_sigs("txd", "1D", True, 2)
end_function(fs, "texture1DProjGradARB")
start_function("texture2DProjGradARB")
generate_fiu_sigs("txd", "2D", True)
generate_fiu_sigs("txd", "2D", True, 1)
end_function(fs, "texture2DProjGradARB")
start_function("texture2DProjGradEXT")
generate_fiu_sigs("txd", "2D", True)
generate_fiu_sigs("txd", "2D", True, 1)
end_function(fs, "texture2DProjGradEXT")
start_function("texture3DProjGradARB")
generate_fiu_sigs("txd", "3D", True)
end_function(fs, "texture3DProjGradARB")
start_function("shadow1DGradARB")
generate_sigs("", "txd", "1DShadow", False, 1)
end_function(fs, "shadow1DGradARB")
start_function("shadow1DProjGradARB")
generate_sigs("", "txd", "1DShadow", True, 1)
end_function(fs, "shadow1DProjGradARB")
start_function("shadow2DGradARB")
generate_sigs("", "txd", "2DShadow", False)
end_function(fs, "shadow2DGradARB")
start_function("shadow2DProjGradARB")
generate_sigs("", "txd", "2DShadow", True)
end_function(fs, "shadow2DProjGradARB")
start_function("texture2DRectGradARB")
generate_sigs("", "txd", "2DRect")
end_function(fs, "texture2DRectGradARB")
start_function("texture2DRectProjGradARB")
generate_sigs("", "txd", "2DRect", True)
generate_sigs("", "txd", "2DRect", True, 1)
end_function(fs, "texture2DRectProjGradARB")
start_function("shadow2DRectGradARB")
generate_sigs("", "txd", "2DRectShadow", False)
end_function(fs, "shadow2DRectGradARB")
start_function("shadow2DRectProjGradARB")
generate_sigs("", "txd", "2DRectShadow", True)
end_function(fs, "shadow2DRectProjGradARB")
# Deprecated (110/120 style) functions with silly names:
start_function("texture1D")
generate_sigs("", "tex", "1D")
generate_sigs("", "txb", "1D")
end_function(fs, "texture1D")
start_function("texture1DLod")
generate_sigs("", "txl", "1D")
end_function(fs, "texture1DLod")
start_function("texture1DProj")
generate_sigs("", "tex", "1D", Proj)
generate_sigs("", "tex", "1D", Proj, 2)
generate_sigs("", "txb", "1D", Proj)
generate_sigs("", "txb", "1D", Proj, 2)
end_function(fs, "texture1DProj")
start_function("texture1DProjLod")
generate_sigs("", "txl", "1D", Proj)
generate_sigs("", "txl", "1D", Proj, 2)
end_function(fs, "texture1DProjLod")
start_function("texture2D")
generate_sigs("", "tex", "2D")
generate_sigs("", "txb", "2D")
end_function(fs, "texture2D")
start_function("texture2DLod")
generate_sigs("", "txl", "2D")
end_function(fs, "texture2DLod")
start_function("texture2DLodEXT")
generate_sigs("", "txl", "2D")
end_function(fs, "texture2DLodEXT")
start_function("texture2DProj")
generate_sigs("", "tex", "2D", Proj)
generate_sigs("", "tex", "2D", Proj, 1)
generate_sigs("", "txb", "2D", Proj)
generate_sigs("", "txb", "2D", Proj, 1)
end_function(fs, "texture2DProj")
start_function("texture2DProjLod")
generate_sigs("", "txl", "2D", Proj)
generate_sigs("", "txl", "2D", Proj, 1)
end_function(fs, "texture2DProjLod")
start_function("texture2DProjLodEXT")
generate_sigs("", "txl", "2D", Proj)
generate_sigs("", "txl", "2D", Proj, 1)
end_function(fs, "texture2DProjLodEXT")
start_function("texture3D")
generate_sigs("", "tex", "3D")
generate_sigs("", "txb", "3D")
end_function(fs, "texture3D")
start_function("texture3DLod")
generate_sigs("", "txl", "3D")
end_function(fs, "texture3DLod")
start_function("texture3DProj")
generate_sigs("", "tex", "3D", Proj)
generate_sigs("", "txb", "3D", Proj)
end_function(fs, "texture3DProj")
start_function("texture3DProjLod")
generate_sigs("", "txl", "3D", Proj)
end_function(fs, "texture3DProjLod")
start_function("textureCube")
generate_sigs("", "tex", "Cube")
generate_sigs("", "txb", "Cube")
end_function(fs, "textureCube")
start_function("textureCubeLod")
generate_sigs("", "txl", "Cube")
end_function(fs, "textureCubeLod")
start_function("textureCubeLodEXT")
generate_sigs("", "txl", "Cube")
end_function(fs, "textureCubeLodEXT")
start_function("shadow1D")
generate_sigs("", "tex", "1DShadow", False, 1)
generate_sigs("", "txb", "1DShadow", False, 1)
end_function(fs, "shadow1D")
start_function("shadow1DLod")
generate_sigs("", "txl", "1DShadow", False, 1)
end_function(fs, "shadow1DLod")
start_function("shadow1DProj")
generate_sigs("", "tex", "1DShadow", Proj, 1)
generate_sigs("", "txb", "1DShadow", Proj, 1)
end_function(fs, "shadow1DProj")
start_function("shadow1DProjLod")
generate_sigs("", "txl", "1DShadow", Proj, 1)
end_function(fs, "shadow1DProjLod")
start_function("shadow2D")
generate_sigs("", "tex", "2DShadow")
generate_sigs("", "txb", "2DShadow")
end_function(fs, "shadow2D")
start_function("shadow2DLod")
generate_sigs("", "txl", "2DShadow")
end_function(fs, "shadow2DLod")
start_function("shadow2DProj")
generate_sigs("", "tex", "2DShadow", Proj)
generate_sigs("", "txb", "2DShadow", Proj)
end_function(fs, "shadow2DProj")
start_function("shadow2DProjLod")
generate_sigs("", "txl", "2DShadow", Proj)
end_function(fs, "shadow2DProjLod")
sys.stdout = sys.__stdout__
return fs
# If you actually run this script, it'll print out all the functions.
if __name__ == "__main__":
fs = {}
generate_texture_functions(fs);
for k, v in fs.iteritems():
print v
| |
#!/usr/bin/env python
'''
Created on 2012-12-11
@author: jock
'''
import SmaliParser
import os
import re
import shutil
import Smali
import SmaliLib
import string
import SCheck
import SmaliEntry
import utils
class SmaliTest(object):
'''
classdocs
'''
def __init__(self, smaliDir):
'''
Constructor
'''
if os.path.isdir("/tmp/what-smali"):
shutil.rmtree("/tmp/what-smali")
#smaliLib = SmaliLib.SmaliLib(smaliDir)
#self.testSmaliLibGetUnImplementMethods("/home/tangliuxiang/work/smali/smali-4.0/devices/t328t/temp/test3/app/Phone/smali/com/android/phone/PhoneInterfaceManager.smali", smaliLib)
for smaliFilePath in self.getSmaliFiles(smaliDir, "/tmp/what-smali/"):
#self.testSmaliGetSuperClassName(smaliFilePath, "/tmp/what-smali/%s" %(smaliFilePath.replace(smaliDir, '',1)))
#self.testSmaliLibGetOverrideMethods(smaliFilePath, smaliLib)
#self.testSmaliGetInvokeMethods(smaliFilePath, smaliLib)
#self.testSamliGetCalledMethod(smaliFilePath, smaliLib)
#self.testSmaliLibGetUnImplementMethods(smaliFilePath, smaliLib)
self.testSmaliSplit(smaliFilePath, "/tmp/what-smali/%s" %(os.path.dirname(smaliFilePath.replace(smaliDir, '',1))))
def getSmaliFiles(self, inDir, outDir):
filelist = []
smaliRe = re.compile(r'(?:^.*\.smali$)')
os.mkdir(outDir)
for root, dirs, files in os.walk(inDir):
for fn in files:
if bool(smaliRe.match(fn)) is True:
filelist.append("%s/%s" % (root, fn))
for dir in dirs:
absDir = "%s/%s" %(root, dir)
odir = "%s/%s" %(outDir, absDir.replace(inDir, '', 1))
if os.path.isdir(odir) is False:
os.mkdir(odir)
return filelist
def testEntryToString(self, smaliFilePath, outFilePath):
'''
new a smali class from smali file
'''
print "testEntryToString: %s" %(smaliFilePath)
entries = SmaliParser.SmaliParser(smaliFilePath).getEntryList()
outFile = file(outFilePath, 'w+')
for entry in entries:
#outFile.write("# entry start: type: %s\n%s\n"%(entry.getType(), entry.toString()))
outFile.write("%s\n"%(entry.toString()))
outFile.close()
def TestSmaliToString(self, smaliFilePath, outFilePath):
'''
new a smali class from smali file
'''
print "TestSmaliToString: %s" %(smaliFilePath)
outFile = file(outFilePath, 'w+')
smaliFile = Smali.Smali(smaliFilePath)
outFile.write(smaliFile.toString())
outFile.close()
def getAllEntryType(self, smaliFilePath, outFilePath):
'''
new a smali class from smali file
'''
print "getAllEntryType: %s" %(smaliFilePath)
outFile1 = file("/tmp/what-smali/smali-entry-type", 'a')
smaliFile = Smali.Smali(smaliFilePath)
for entry in smaliFile.getEntryList():
outFile1.write("%s\n" %entry.getType())
outFile1.close()
outFile = file(outFilePath, 'w+')
outFile.write(smaliFile.toString())
outFile.close()
def testEntryOut(self, smaliFilePath, outFilePath):
'''
new a smali class from smali file
'''
print "testEntryOut: %s" %(smaliFilePath)
smaliFile = Smali.Smali(smaliFilePath)
for entry in smaliFile.getEntryList():
entry.out("/tmp/what-smali/", "")
def testEntryGetName(self, smaliFilePath, outFilePath):
print "testSmaliGetName: %s" %(smaliFilePath)
smaliFile = Smali.Smali(smaliFilePath)
outFile = file(outFilePath, 'w+')
for entry in smaliFile.getEntryList():
outFile.write("# entry name: type: %s name:%s\n"%(entry.getType(), entry.getName()))
outFile.write("%s\n"%(entry.toString()))
outFile.close()
def testSmaliGetSuperClassName(self, smaliFilePath, outFilePath):
smaliFile = Smali.Smali(smaliFilePath)
print "testSmaliGetSuper: %s, super: %s" %(smaliFilePath, smaliFile.getSuperClassName())
def testSmaliLibGetOverrideMethods(self, smaliFilePath, smaliLib):
smali = Smali.Smali(smaliFilePath)
if smali.isAbstractClass() or smali.isInterface():
return
#print "Overrides in: %s" %smaliFilePath
sMethods = smaliLib.getAllMethods(smali)
for overMethod in smaliLib.getNeedOverrideMethods(smali):
#print "\t\tMethod: %s" %overMethod
hasMethod = False
for method in sMethods:
if overMethod == method:
hasMethod = True
break
if hasMethod == False:
print "%s doesn't implements method: %s" %(smaliFilePath, overMethod)
def testSmaliGetInvokeMethods(self, smaliFilePath, smaliLib):
smali = Smali.Smali(smaliFilePath)
for method in smali.getInvokeMethods():
print "Invoke methods: type: %s, cls: %s, method: %s" %(method.type, method.cls, method.method)
def testSamliGetCalledMethod(self, smaliFilePath, smaliLib):
smali = Smali.Smali(smaliFilePath)
print "Smali: %s" %smaliFilePath
for method in smaliLib.getCalledMethod(smali):
print "method was called: type: %s, cls: %s, method: %s" %(method.type, method.cls, method.method)
def testSmaliLibCheckMethod(self, smaliFilePath, smaliLib):
smali = Smali.Smali(smaliFilePath)
#print "Smali: %s" %smaliFilePath
smaliLib.checkMethods(smali)
def testSmaliLibGetUnImplementMethods(self, smaliFilePath, smaliLib):
smali = Smali.Smali(smaliFilePath)
methodsList = smaliLib.getUnImplementMethods(smali)
for method in methodsList:
print "%s: %s" %(smali.getClassName(), method)
def testSmaliSplit(self, smaliFilePath, outFilePath):
smali = Smali.Smali(smaliFilePath)
smali.split(outFilePath)
def testSCheckGetCanReplaceMethods(vendorDir, aosp, bosp):
sCheck = SCheck.SCheck(vendorDir, aosp, bosp)
for key in sCheck.mBSLib.mSDict.keys():
smali = sCheck.mBSLib.getSmali(key)
methodsList = sCheck.getCanReplaceToBoardMethods(smali, smali.getEntryList(SmaliEntry.METHOD))
if methodsList is None:
continue
for method in methodsList:
print "%s: %s" %(smali.getClassName(), method.getName())
def testSCheckAutoComplete(vendorDir, aosp, bosp, mergedDir):
sCheck = SCheck.SCheck(vendorDir, aosp, bosp, mergedDir)
for key in sCheck.mMSLib.mSDict.keys():
smali = sCheck.mMSLib.getSmali(key)
if smali.getPackageName() == r'Lcom/baidu/internal/telephony/sip':
continue
# print "packageName: %s, clsName: %s" %(smali.getPackageName(), smali.getClassName())
methodsList = sCheck.autoComplete(smali)
for method in methodsList:
print "%s: %s" %(method.getClassName(), method.getName())
def testSCheckGetUnImplementMethods(vendorDir, aosp, bosp, mergedDir):
sCheck = SCheck.SCheck(vendorDir, aosp, bosp, mergedDir)
for key in sCheck.mMSLib.mSDict.keys():
smali = sCheck.mMSLib.getSmali(key)
if smali.getPackageName() == r'Lcom/baidu/internal/telephony/sip':
continue
# print "packageName: %s, clsName: %s" %(smali.getPackageName(), smali.getClassName())
methodsList = sCheck.getUnImplementMethods(smali)
for method in methodsList:
print "%s: %s" %(smali.getClassName(), method)
# smali = Smali.Smali("/home/tangliuxiang/work/smali/smali-4.0/devices/t328t/temp/test3/app/Phone/smali/com/android/phone/PhoneInterfaceManager.smali")
# methodsList = sCheck.getUnImplementMethods(smali)
# for method in methodsList:
# print "%s: %s" %(smali.getClassName(), method)
def testSmaliGetSmaliPathList(smaliDir):
smaliDict = utils.getSmaliDict(smaliDir)
for key in smaliDict.keys():
print "key: %s, file: %s" %(key, smaliDict[key].mPath)
if __name__ == "__main__":
print ""
smali = SmaliTest("/home/tangliuxiang/work/smali/smali-mtk-4.2/devices/h30_t00/framework.jar.out/")
#smali = SmaliTest("/home/tangliuxiang/work/smali/smali-4.0/devices/t328t/temp/test-telephony")
#testSmaliGetSmaliPathList("/home/tangliuxiang/work/smali/smali-mtk-4.2/devices/h30_t00/temp/ori/framework/")
# main()
#testSCheckGetCanReplaceMethods("/home/tangliuxiang/work/smali/smali-4.0/devices/t328t/temp/ori/framework", "/home/tangliuxiang/work/smali/smali-4.0/reference/aosp", "/home/tangliuxiang/work/smali/smali-4.0/devices/t328t/temp/baidu")
#testSCheckGetUnImplementMethods("/home/tangliuxiang/work/smali/smali-4.0/devices/t328t/temp/test-telephony/ori", "/home/tangliuxiang/work/smali/smali-4.0/reference/aosp", "/home/tangliuxiang/work/smali/smali-4.0/reference/bosp")
#testSCheckGetUnImplementMethods("/home/tangliuxiang/work/smali/smali-4.0/devices/t328t/temp/ori/framework", "/home/tangliuxiang/work/smali/smali-4.0/reference/aosp", "/home/tangliuxiang/work/smali/smali-4.0/reference/bosp", "/home/tangliuxiang/work/smali/smali-4.0/devices/t328t/temp/test3")
#testSCheckAutoComplete("/home/tangliuxiang/work/smali/smali-4.2/devices/p6/temp/ori", "/home/tangliuxiang/work/smali/smali-4.2/reference/aosp", "/home/tangliuxiang/work/smali/smali-4.2/devices/p6/temp/baidu", "/home/tangliuxiang/work/smali/smali-4.2/devices/p6/temp/test-1")
| |
"""
Fork of pynxos library from network to code and mzbenami
Reimplemented by ktbyers to support XML-RPC in addition to JSON-RPC
"""
from __future__ import print_function, unicode_literals
from builtins import super
import requests
from requests.auth import HTTPBasicAuth
from requests.exceptions import ConnectionError
import json
from lxml import etree
from six import string_types
from napalm.nxapi_plumbing.errors import (
NXAPIError,
NXAPIPostError,
NXAPICommandError,
NXAPIXMLError,
NXAPIAuthError,
NXAPIConnectionError,
)
class RPCBase(object):
"""RPCBase class should be API-type neutral (i.e. shouldn't care whether XML or jsonrpc)."""
def __init__(
self,
host,
username,
password,
transport="https",
port=None,
timeout=30,
verify=True,
):
if transport not in ["http", "https"]:
raise NXAPIError("'{}' is an invalid transport.".format(transport))
if port is None:
if transport == "http":
port = 80
elif transport == "https":
port = 443
self.url = "{}://{}:{}/ins".format(transport, host, port)
self.username = username
self.password = password
self.timeout = timeout
self.verify = verify
def _process_api_response(self, response, commands, raw_text=False):
raise NotImplementedError("Method must be implemented in child class")
def _send_request(self, commands, method):
payload = self._build_payload(commands, method)
try:
response = requests.post(
self.url,
timeout=self.timeout,
data=payload,
headers=self.headers,
auth=HTTPBasicAuth(self.username, self.password),
verify=self.verify,
)
except ConnectionError as e:
raise NXAPIConnectionError(str(e))
if response.status_code == 401:
msg = (
"Authentication to NX-API failed please verify your username, password, "
"and hostname."
)
raise NXAPIAuthError(msg)
if response.status_code not in [200]:
msg = """Invalid status code returned on NX-API POST
commands: {}
status_code: {}""".format(
commands, response.status_code
)
raise NXAPIPostError(msg)
return response.text
class RPCClient(RPCBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.headers = {"content-type": "application/json-rpc"}
self.api = "jsonrpc"
self.cmd_method = "cli"
self.cmd_method_conf = "cli"
self.cmd_method_raw = "cli_ascii"
def _nxapi_command(self, commands, method=None):
"""Send a command down the NX-API channel."""
if method is None:
method = self.cmd_method
if isinstance(commands, string_types):
commands = [commands]
raw_text = True if method == "cli_ascii" else False
response = self._send_request(commands, method=method)
api_response = self._process_api_response(response, commands, raw_text=raw_text)
return api_response
def _nxapi_command_conf(self, commands, method=None):
if method is None:
method = self.cmd_method_conf
return self._nxapi_command(commands=commands, method=method)
def _build_payload(self, commands, method, rpc_version="2.0", api_version=1.0):
"""Construct the JSON-RPC payload for NX-API."""
payload_list = []
id_num = 1
for command in commands:
payload = {
"jsonrpc": rpc_version,
"method": method,
"params": {"cmd": command, "version": api_version},
"id": id_num,
}
payload_list.append(payload)
id_num += 1
return json.dumps(payload_list)
def _process_api_response(self, response, commands, raw_text=False):
"""
Normalize the API response including handling errors; adding the sent command into
the returned data strucutre; make response structure consistent for raw_text and
structured data.
"""
response_list = json.loads(response)
if isinstance(response_list, dict):
response_list = [response_list]
# Add the 'command' that was executed to the response dictionary
for i, response_dict in enumerate(response_list):
response_dict["command"] = commands[i]
new_response = []
for response in response_list:
# Dectect errors
self._error_check(response)
# Some commands like "show run" can have a None result
cmd_response = response.get("result")
if cmd_response is None:
cmd_response = {}
# Normalize the response data structure
response_dict = {"command": response["command"]}
if response and raw_text:
response_dict["result"] = cmd_response.get("msg")
elif response and not raw_text:
response_dict["result"] = cmd_response.get("body")
else:
raise NXAPIError("Unexpected value encountered processing response.")
new_response.append(response_dict)
return new_response
def _error_check(self, command_response):
error = command_response.get("error")
if error:
command = command_response.get("command")
if "data" in error:
raise NXAPICommandError(command, error["data"]["msg"])
else:
raise NXAPICommandError(command, "Invalid command.")
class XMLClient(RPCBase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.headers = {"content-type": "application/xml"}
self.api = "xml"
self.cmd_method = "cli_show"
self.cmd_method_conf = "cli_conf"
self.cmd_method_raw = "cli_show_ascii"
def _nxapi_command(self, commands, method=None):
"""Send a command down the NX-API channel."""
if method is None:
method = self.cmd_method
if isinstance(commands, string_types):
commands = [commands]
response = self._send_request(commands, method=method)
api_response = self._process_api_response(response, commands)
for command_response in api_response:
self._error_check(command_response)
return api_response
def _nxapi_command_conf(self, commands, method=None):
if method is None:
method = self.cmd_method_conf
return self._nxapi_command(commands=commands, method=method)
def _build_payload(self, commands, method, xml_version="1.0", version="1.0"):
xml_commands = ""
for command in commands:
if not xml_commands:
# initial command is just the command itself
xml_commands += command
else:
# subsequent commands are separate by semi-colon
xml_commands += " ;{}".format(command)
payload = """<?xml version="{xml_version}"?>
<ins_api>
<version>{version}</version>
<type>{method}</type>
<chunk>0</chunk>
<sid>sid</sid>
<input>{command}</input>
<output_format>xml</output_format>
</ins_api>""".format(
xml_version=xml_version,
version=version,
method=method,
command=xml_commands,
)
return payload
def _process_api_response(self, response, commands, raw_text=False):
xml_root = etree.fromstring(response)
response_list = xml_root.xpath("outputs/output")
if len(commands) != len(response_list):
raise NXAPIXMLError(
"XML response doesn't match expected number of commands."
)
return response_list
def _error_check(self, command_response):
"""commmand_response will be an XML Etree object."""
error_list = command_response.find("./clierror")
command_obj = command_response.find("./input")
if error_list is not None:
command = command_obj.text if command_obj is not None else "Unknown command"
msg = etree.tostring(error_list).decode()
raise NXAPICommandError(command, msg)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=invalid-name, exec-used
"""Setup mxnet package for pip."""
from __future__ import absolute_import
from datetime import datetime
import os
import sys
import shutil
import platform
if platform.system() == 'Linux':
sys.argv.append('--universal')
sys.argv.append('--plat-name=manylinux2014_x86_64')
from setuptools import setup, find_packages
from setuptools.dist import Distribution
# We can not import `mxnet.info.py` in setup.py directly since mxnet/__init__.py
# Will be invoked which introduces dependences
CURRENT_DIR = os.path.dirname(__file__)
libinfo_py = os.path.join(CURRENT_DIR, 'mxnet-build/python/mxnet/libinfo.py')
libinfo = {'__file__': libinfo_py}
exec(compile(open(libinfo_py, "rb").read(), libinfo_py, 'exec'), libinfo, libinfo)
LIB_PATH = libinfo['find_lib_path']()
__version__ = libinfo['__version__']
# set by the CD pipeline
is_release = os.environ.get("IS_RELEASE", "").strip()
# set by the travis build pipeline
travis_tag = os.environ.get("TRAVIS_TAG", "").strip()
# nightly build tag
if not travis_tag and not is_release:
__version__ += 'b{0}'.format(datetime.today().strftime('%Y%m%d'))
# patch build tag
elif travis_tag.startswith('patch-'):
__version__ = os.environ['TRAVIS_TAG'].split('-')[1]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return platform.system() == 'Darwin'
DEPENDENCIES = [
'numpy<2.0.0,>1.16.0',
'requests>=2.20.0,<3',
'graphviz<0.9.0,>=0.8.1'
]
shutil.rmtree(os.path.join(CURRENT_DIR, 'mxnet'), ignore_errors=True)
shutil.rmtree(os.path.join(CURRENT_DIR, 'dmlc_tracker'), ignore_errors=True)
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/python/mxnet'),
os.path.join(CURRENT_DIR, 'mxnet'))
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/3rdparty/dmlc-core/tracker/dmlc_tracker'),
os.path.join(CURRENT_DIR, 'dmlc_tracker'))
shutil.copy(LIB_PATH[0], os.path.join(CURRENT_DIR, 'mxnet'))
# copy license and notice
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/licenses'),
os.path.join(CURRENT_DIR, 'mxnet/licenses'))
# copy tools to mxnet package
shutil.rmtree(os.path.join(CURRENT_DIR, 'mxnet/tools'), ignore_errors=True)
os.mkdir(os.path.join(CURRENT_DIR, 'mxnet/tools'))
shutil.copy(os.path.join(CURRENT_DIR, 'mxnet-build/tools/launch.py'), os.path.join(CURRENT_DIR, 'mxnet/tools'))
shutil.copy(os.path.join(CURRENT_DIR, 'mxnet-build/tools/im2rec.py'), os.path.join(CURRENT_DIR, 'mxnet/tools'))
shutil.copy(os.path.join(CURRENT_DIR, 'mxnet-build/tools/kill-mxnet.py'), os.path.join(CURRENT_DIR, 'mxnet/tools'))
shutil.copy(os.path.join(CURRENT_DIR, 'mxnet-build/tools/parse_log.py'), os.path.join(CURRENT_DIR, 'mxnet/tools'))
shutil.copy(os.path.join(CURRENT_DIR, 'mxnet-build/tools/diagnose.py'), os.path.join(CURRENT_DIR, 'mxnet/tools'))
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/tools/caffe_converter'), os.path.join(CURRENT_DIR, 'mxnet/tools/caffe_converter'))
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/tools/bandwidth'), os.path.join(CURRENT_DIR, 'mxnet/tools/bandwidth'))
# copy headers to mxnet package
shutil.rmtree(os.path.join(CURRENT_DIR, 'mxnet/include'), ignore_errors=True)
os.mkdir(os.path.join(CURRENT_DIR, 'mxnet/include'))
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/include/mxnet'),
os.path.join(CURRENT_DIR, 'mxnet/include/mxnet'))
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/3rdparty/dlpack/include/dlpack'),
os.path.join(CURRENT_DIR, 'mxnet/include/dlpack'))
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/3rdparty/dmlc-core/include/dmlc'),
os.path.join(CURRENT_DIR, 'mxnet/include/dmlc'))
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/3rdparty/mshadow/mshadow'),
os.path.join(CURRENT_DIR, 'mxnet/include/mshadow'))
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/3rdparty/tvm/nnvm/include/nnvm'),
os.path.join(CURRENT_DIR, 'mxnet/include/nnvm'))
package_name = 'mxnet'
variant = os.environ['mxnet_variant'].upper()
if variant != 'CPU':
package_name = 'mxnet_{0}'.format(variant.lower())
def skip_markdown_comments(md):
lines = md.splitlines()
for i in range(len(lines)):
if lines[i].strip():
if not lines[i].startswith('<!--') or not lines[i].endswith('-->'):
return '\n'.join(lines[i:])
with open('doc/PYPI_README.md') as readme_file:
long_description = skip_markdown_comments(readme_file.read())
with open('doc/{0}_ADDITIONAL.md'.format(variant)) as variant_doc:
long_description = long_description + skip_markdown_comments(variant_doc.read())
# pypi only supports rst, so use pandoc to convert
import pypandoc
if platform.system() == 'Darwin':
pypandoc.download_pandoc()
long_description = pypandoc.convert_text(long_description, 'rst', 'md')
short_description = 'MXNet is an ultra-scalable deep learning framework.'
libraries = []
if variant == 'CPU':
libraries.append('openblas')
else:
if variant.startswith('CU102'):
libraries.append('CUDA-10.2')
elif variant.startswith('CU101'):
libraries.append('CUDA-10.1')
elif variant.startswith('CU100'):
libraries.append('CUDA-10.0')
elif variant.startswith('CU92'):
libraries.append('CUDA-9.2')
if variant != 'NATIVE':
libraries.append('MKLDNN')
short_description += ' This version uses {0}.'.format(' and '.join(libraries))
package_data = {'mxnet': [os.path.join('mxnet', os.path.basename(LIB_PATH[0]))],
'dmlc_tracker': []}
if variant != 'NATIVE':
shutil.copytree(os.path.join(CURRENT_DIR, 'mxnet-build/3rdparty/mkldnn/include'),
os.path.join(CURRENT_DIR, 'mxnet/include/mkldnn'))
if platform.system() == 'Linux':
libdir, mxdir = os.path.dirname(LIB_PATH[0]), os.path.join(CURRENT_DIR, 'mxnet')
if os.path.exists(os.path.join(libdir, 'libgfortran.so.3')):
shutil.copy(os.path.join(libdir, 'libgfortran.so.3'), mxdir)
package_data['mxnet'].append('mxnet/libgfortran.so.3')
else:
shutil.copy(os.path.join(libdir, 'libgfortran.so.4'), mxdir)
package_data['mxnet'].append('mxnet/libgfortran.so.4')
shutil.copy(os.path.join(libdir, 'libquadmath.so.0'), mxdir)
package_data['mxnet'].append('mxnet/libquadmath.so.0')
if os.path.exists(os.path.join(libdir, 'libopenblas.so.0')):
shutil.copy(os.path.join(libdir, 'libopenblas.so.0'), mxdir)
package_data['mxnet'].append('mxnet/libquadmath.so.0')
# Copy licenses and notice
for f in os.listdir('mxnet/licenses'):
package_data['mxnet'].append('mxnet/licenses/{}'.format(f))
from mxnet.base import _generate_op_module_signature
from mxnet.ndarray.register import _generate_ndarray_function_code
from mxnet.symbol.register import _generate_symbol_function_code
_generate_op_module_signature('mxnet', 'symbol', _generate_symbol_function_code)
_generate_op_module_signature('mxnet', 'ndarray', _generate_ndarray_function_code)
setup(name=package_name,
version=__version__,
long_description=long_description,
description=short_description,
zip_safe=False,
packages=find_packages(),
package_data=package_data,
include_package_data=True,
install_requires=DEPENDENCIES,
distclass=BinaryDistribution,
license='Apache 2.0',
classifiers=[ # https://pypi.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: C++',
'Programming Language :: Cython',
'Programming Language :: Other', # R, Scala
'Programming Language :: Perl',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
url='https://github.com/apache/incubator-mxnet')
| |
import logging
import os
from collections import namedtuple
import xml.etree.ElementTree as ET
import timeit
import numpy as np
import pysimox
import pymmm
SUPPORTED_JOINTS = ['BPx_joint', 'BPy_joint', 'BPz_joint', 'BTx_joint', 'BTy_joint', 'BTz_joint', 'BUNx_joint',
'BUNy_joint', 'BUNz_joint', 'LAx_joint', 'LAy_joint', 'LAz_joint', 'LEx_joint', 'LEz_joint',
'LHx_joint', 'LHy_joint', 'LHz_joint', 'LKx_joint', 'LSx_joint', 'LSy_joint', 'LSz_joint',
'LWx_joint', 'LWy_joint', 'RAx_joint', 'RAy_joint', 'RAz_joint', 'REx_joint', 'REz_joint',
'RHx_joint', 'RHy_joint', 'RHz_joint', 'RKx_joint', 'RSx_joint', 'RSy_joint', 'RSz_joint',
'RWx_joint', 'RWy_joint', 'BLNx_joint', 'BLNy_joint', 'BLNz_joint']
FEATURE_NAMES = ['joint_pos', 'joint_vel', 'joint_vel_norm', 'joint_acc', 'joint_acc_norm',
'root_pos', 'root_vel', 'root_vel_norm', 'root_acc', 'root_acc_norm',
'com_pos', 'com_vel', 'com_vel_norm', 'com_acc', 'com_acc_norm',
'left_hand_pos', 'left_hand_vel', 'left_hand_vel_norm', 'left_hand_acc', 'left_hand_acc_norm',
'right_hand_pos', 'right_hand_vel', 'right_hand_vel_norm', 'right_hand_acc', 'right_hand_acc_norm',
'left_foot_pos', 'left_foot_vel', 'left_foot_vel_norm', 'left_foot_acc', 'left_foot_acc_norm',
'right_foot_pos', 'right_foot_vel', 'right_foot_vel_norm', 'right_foot_acc', 'right_foot_acc_norm',
'root_rot', 'root_rot_norm',
'angular_momentum', 'angular_momentum_norm']
Frame = namedtuple('Frame', 'timestep, root_pos, root_rot, joint_pos, joint_vel, joint_acc, angular_momentum, com')
# Internal constants
_POS_TO_SEGMENT_MAP = {'left_hand_pos': 'LeftHandSegment_joint',
'right_hand_pos': 'RightHandSegment_joint',
'left_foot_pos': 'LeftFootHeight_joint',
'right_foot_pos': 'RightFootHeight_joint',
'left_hand_vel': 'LeftHandSegment_joint',
'right_hand_vel': 'RightHandSegment_joint',
'left_foot_vel': 'LeftFootHeight_joint',
'right_foot_vel': 'RightFootHeight_joint',
'left_hand_acc': 'LeftHandSegment_joint',
'right_hand_acc': 'RightHandSegment_joint',
'left_foot_acc': 'LeftFootHeight_joint',
'right_foot_acc': 'RightFootHeight_joint'}
_SEGMENT_POS_NAMES = ['left_hand_pos', 'right_hand_pos', 'left_foot_pos', 'right_foot_pos']
_SEGMENT_VEL_NAMES = ['left_hand_vel', 'right_hand_vel', 'left_foot_vel', 'right_foot_vel']
_SEGMENT_ACC_NAMES = ['left_hand_acc', 'right_hand_acc', 'left_foot_acc', 'right_foot_acc']
_POS_NAMES = [_name for _name in FEATURE_NAMES if _name.endswith('_pos')]
_VEL_NAMES = [_name for _name in FEATURE_NAMES if _name.endswith('_vel')]
_ACC_NAMES = [_name for _name in FEATURE_NAMES if _name.endswith('_acc')]
def rotation_matrix(roll, pitch, yaw):
r11 = np.cos(yaw) * np.cos(pitch)
r12 = np.cos(yaw) * np.sin(pitch) * np.sin(roll) - np.sin(yaw) * np.cos(roll)
r13 = np.cos(yaw) * np.sin(pitch) * np.cos(roll) + np.sin(yaw) * np.sin(roll)
r21 = np.sin(yaw) * np.cos(pitch)
r22 = np.sin(yaw) * np.sin(pitch) * np.sin(roll) + np.cos(yaw) * np.cos(roll)
r23 = np.sin(yaw) * np.sin(pitch) * np.cos(roll) - np.cos(yaw) * np.sin(roll)
r31 = -np.sin(pitch)
r32 = np.cos(pitch) * np.sin(roll)
r33 = np.cos(pitch) * np.cos(roll)
matrix = np.array([[r11, r12, r13],
[r21, r22, r23],
[r31, r32, r33]])
return matrix
def pose_matrix(root_pos, root_rot):
mat = np.eye(4)
mat[0:3, 0:3] = rotation_matrix(root_rot[0], root_rot[1], root_rot[2])
mat[0:3, 3] = root_pos
return mat.astype('float32') # this cast is VERY IMPORTANT b/c otherwise pysimox and pymmm need float32!
class Motion(object):
def __init__(self, frames, joint_names, model_path, subject_height, subject_mass):
self.joint_names = joint_names
self.frames = frames
self.n_frames = len(frames)
self.model_path = model_path
self.subject_height = subject_height # in meters
self.subject_mass = subject_mass # in kilogram
self.dt = frames[1].timestep - frames[0].timestep # assuming equidistant frames
assert self.dt > 0.
self.robot = None
self.robot_node_set = None
def features(self, names, normalize=True):
if not self.robot:
model_reader = pymmm.ModelReaderXML()
mmm_model = model_reader.loadModel(self.model_path)
model_processor_winter = pymmm.ModelProcessorWinter()
model_processor_winter.setup(self.subject_height, self.subject_mass)
processed_model = model_processor_winter.convertModel(mmm_model)
self.robot = pymmm.buildModel(processed_model, False)
self.robot_node_set = pysimox.RobotNodeSet.createRobotNodeSet(self.robot, 'RobotNodeSet', self.joint_names,
'', '', True)
start_time = timeit.default_timer()
if len(names) == 0:
raise ValueError('must at least specify one feature name')
for name in names:
if name not in FEATURE_NAMES:
raise ValueError('unknown feature %s' % name)
features = {}
# Calculate pos, acc and vel without normalization. We calculate those regardless of the requested features
# since resolving dependencies between them is not worth the effort.
self._calculate_positions(features)
self._calculate_velocities(features)
self._calculate_accelerations(features)
# Calculate other normalized features.
if 'root_rot' in names or 'root_rot_norm' in names:
self._calculate_and_normalize_root_rot(features, normalize)
if 'angular_momentum' in names or 'angular_momentum_norm' in names:
self._calculate_and_normalize_angular_momentum(features, normalize)
# Normalize pos, vel and acc.
if normalize:
self._normalize_positions(features)
self._normalize_velocities(features)
self._normalize_accelerations(features)
# Calculate norm features
self._calculate_norm_features(features)
# Assemble features into feature matrix.
X = np.zeros((self.n_frames, 0))
lengths = []
for name in names:
feature = np.array(features[name])
assert feature.shape[0] == self.n_frames
X = np.hstack((X, feature))
lengths.append(feature.shape[1])
n_features = np.sum(lengths)
assert X.shape == (self.n_frames, n_features)
logging.info('loading features took %fs' % (timeit.default_timer() - start_time))
return X, lengths
def _configure_robot_with_frame(self, frame):
pose = pose_matrix(frame.root_pos, frame.root_rot)
self.robot.setGlobalPose(pose, True)
self.robot_node_set.setJointValues(frame.joint_pos)
def _calculate_positions(self, features):
for name in _POS_NAMES:
features[name] = []
features['root_pose'] = []
for segment_name in set(_POS_TO_SEGMENT_MAP.values()):
features[segment_name] = []
for frame in self.frames:
self._configure_robot_with_frame(frame)
features['root_pose'].append(self.robot.getGlobalPose())
for name in _POS_NAMES:
pos = None
if name == 'joint_pos':
pos = np.array(frame.joint_pos)
elif name == 'root_pos':
pos = np.array(frame.root_pos)
elif name == 'com_pos':
pos = self.robot.getCoMGlobal().reshape(3,)
elif name in _SEGMENT_POS_NAMES:
segment_name = _POS_TO_SEGMENT_MAP[name]
node = self.robot.getRobotNode(segment_name)
segment_pose = node.getGlobalPose()
pos = segment_pose[0:3, 3].reshape(3,)
features[segment_name].append(segment_pose)
assert pos is not None
features[name].append(pos)
def _calculate_velocities(self, features):
vel_to_pos_map = {}
for name in _VEL_NAMES:
vel_to_pos_map[name] = name[:-3] + 'pos'
features[name] = []
self._calculate_vel_or_acc(features, _VEL_NAMES, vel_to_pos_map)
def _calculate_accelerations(self, features):
acc_to_vel_map = {}
for name in _ACC_NAMES:
acc_to_vel_map[name] = name[:-3] + 'vel'
features[name] = []
self._calculate_vel_or_acc(features, _ACC_NAMES, acc_to_vel_map)
def _calculate_vel_or_acc(self, features, names, antiderivative_map):
for idx, frame in enumerate(self.frames):
prev_idx = idx - 1 if idx > 0 else 0
next_idx = idx + 1 if idx < self.n_frames - 1 else self.n_frames - 1
for name in names:
antiderivative_name = antiderivative_map[name]
prev_value = features[antiderivative_name][prev_idx]
next_value = features[antiderivative_name][next_idx]
derivative = (next_value - prev_value) / (2. * self.dt)
features[name].append(derivative)
def _normalize_positions(self, features):
#root_rot0 = self.frames[0].root_rot
root_rot0 = (0., 0., self.frames[0].root_rot[2])
root_pose0_inv = np.linalg.inv(pose_matrix(self.frames[0].root_pos, root_rot0))
for name in _POS_NAMES:
if name == 'joint_pos':
continue
pos = np.hstack((np.array(features[name]), np.ones((self.n_frames, 1))))
features[name] = np.dot(root_pose0_inv, pos.T).T[:, 0:3]
def _normalize_velocities(self, features):
self._normalize_vel_or_acc(features, _VEL_NAMES)
def _normalize_accelerations(self, features):
self._normalize_vel_or_acc(features, _ACC_NAMES)
def _normalize_vel_or_acc(self, features, names):
# TODO: implement different normalization types to segment
for idx, frame in enumerate(self.frames):
root_rot_inv = np.linalg.inv(features['root_pose'][idx][0:3, 0:3])
for name in names:
if name == 'joint_vel' or name == 'joint_acc':
continue
value = features[name][idx]
if name in _SEGMENT_VEL_NAMES or name in _SEGMENT_ACC_NAMES:
segment_name = _POS_TO_SEGMENT_MAP[name]
node_rot_inv = np.linalg.inv(features[segment_name][idx][0:3, 0:3])
value = np.dot(node_rot_inv, value)
else:
value = np.dot(root_rot_inv, value)
features[name][idx] = value
def _calculate_and_normalize_root_rot(self, features, normalize):
offset = np.zeros(3)
frame0 = self.frames[0]
feature = []
for idx, frame in enumerate(self.frames):
rot = np.array(frame.root_rot)
if normalize:
prev_idx = idx - 1 if idx > 0 else 0
prev_rot = np.array(self.frames[prev_idx].root_rot)
# Detect overflow (that is a jump from +pi to -pi) and underflow (jump from -pi to +pi). If such
# a jump happens, we normalize by adding +2pi (-2pi respectively) as a correction factor to all
# rotations following the jump.
threshold = 2. * np.pi - 0.1
delta_root_rot = prev_rot - rot
overflow_indexes = np.where(delta_root_rot > threshold)[0]
if len(overflow_indexes) > 0:
offset[overflow_indexes] += 2. * np.pi
underflow_indexes = np.where(delta_root_rot < -threshold)[0]
if len(underflow_indexes) > 0:
offset[underflow_indexes] -= 2. * np.pi
rot = (rot + offset) - np.array(frame0.root_rot)
feature.append(rot)
features['root_rot'] = feature
def _calculate_and_normalize_angular_momentum(self, features, normalize):
n_frames = len(self.frames)
# Pre-calculate some dynamic properties
inverse_kinematics = pysimox.DifferentialIK(self.robot_node_set)
segments_with_mass = [node.getName() for node in self.robot.getRobotNodes() if node.getMass() > 0.]
frame_segment_data = []
for idx, frame in enumerate(self.frames):
self._configure_robot_with_frame(frame)
segment_map = {}
for segment in segments_with_mass:
node = self.robot.getRobotNode(segment)
data = {'com': node.getCoMGlobal().reshape(3,),
'jac': inverse_kinematics.getJacobianMatrix(node)}
segment_map[segment] = data
frame_segment_data.append(segment_map)
# Calculate angular momentum
feature = []
for idx, frame in enumerate(self.frames):
self._configure_robot_with_frame(frame)
prev_idx = idx - 1 if idx > 0 else 0
next_idx = idx + 1 if idx < n_frames-1 else n_frames-1
angular_momentum = np.zeros(3)
for segment in segments_with_mass:
node = self.robot.getRobotNode(segment)
# Calculate linear velocity
prev_segment_com = frame_segment_data[prev_idx][segment]['com']
next_segment_com = frame_segment_data[next_idx][segment]['com']
segment_com_vel = next_segment_com - prev_segment_com
com_vel = features['com_pos'][next_idx] - features['com_pos'][prev_idx]
linear_vel = (segment_com_vel - com_vel) / (2. * self.dt)
# Calculate angular velocity
joint_vel = features['joint_vel'][idx]
angular_vel = np.dot(frame_segment_data[idx][segment]['jac'], joint_vel)[3:6]
# Calculate angular momentum for this segment
delta_com = frame_segment_data[idx][segment]['com'] - features['com_pos'][idx]
if normalize:
root_rot_inv = np.linalg.inv(self.robot.getGlobalPose()[0:3, 0:3])
delta_com = np.dot(root_rot_inv, delta_com)
linear_vel = np.dot(root_rot_inv, linear_vel)
inertia_tensor = node.getInertiaMatrix() * 1000000. # m^2 -> mm^2
segment_am = node.getMass() * np.cross(delta_com, linear_vel)
segment_am += np.dot(inertia_tensor, angular_vel)
assert segment_am.shape == angular_momentum.shape
angular_momentum += segment_am
angular_momentum /= 1000000. # mm^2 -> m^2
# TODO: normalization of the velocities/positions is not necessary. However, subject-specifc
# values like the height should be normalized eventually.
# Also see http://jeb.biologists.org/content/211/4/467.full.pdf
feature.append(angular_momentum)
features['angular_momentum'] = feature
def _calculate_norm_features(self, features):
for name in FEATURE_NAMES:
if not name.endswith('_norm'):
continue
non_norm_name = name[:-5]
if non_norm_name not in features:
continue
norms = np.linalg.norm(features[non_norm_name], axis=1).reshape((self.n_frames, 1))
features[name] = norms
def parse_motions(path):
xml_tree = ET.parse(path)
xml_root = xml_tree.getroot()
xml_motions = xml_root.findall('Motion')
motions = []
# TODO: currently we only read the first motion, which is usually the movement. Some files also contain other
# motions, which are usually objects and/or obstacles in the scene. We should be somehow able to handle this better
# in case the human motion is not the first motion in the file
if len(xml_motions) > 1:
logging.warn('more than one <Motion> tag in file "%s", only parsing the first one', path)
motions.append(_parse_motion(xml_motions[0], path))
return motions
def _parse_motion(xml_motion, path):
# Extract model information
xml_model_file = xml_motion.find('Model/File')
if xml_model_file is None:
raise RuntimeError('model file not found')
model_path = str(os.path.abspath(os.path.join(os.path.dirname(path), xml_model_file.text)))
if not os.path.exists(model_path):
raise RuntimeError('model "%s" does not exist' % model_path)
# Extract subject information
xml_height = xml_motion.find('ModelProcessorConfig/Height')
if xml_height is None:
raise RuntimeError('subject height not found')
xml_mass = xml_motion.find('ModelProcessorConfig/Mass')
if xml_mass is None:
raise RuntimeError('subject mass not found')
subject_height, subject_mass = float(xml_height.text), float(xml_mass.text)
xml_joint_order = xml_motion.find('JointOrder')
if xml_joint_order is None:
raise RuntimeError('<JointOrder> not found')
joint_names = []
joint_indexes = []
for idx, xml_joint in enumerate(xml_joint_order.findall('Joint')):
name = xml_joint.get('name')
if name is None:
raise RuntimeError('<Joint> has no name')
elif name not in SUPPORTED_JOINTS:
logging.warn('joint %s is unsupported - skipping', name)
continue
joint_indexes.append(idx)
joint_names.append(name)
frames = []
xml_frames = xml_motion.find('MotionFrames')
if xml_frames is None:
raise RuntimeError('<MotionFrames> not found')
for xml_frame in xml_frames.findall('MotionFrame'):
frames.append(_parse_frame(xml_frame, joint_indexes))
return Motion(frames, joint_names, model_path, subject_height, subject_mass)
def _parse_frame(xml_frame, joint_indexes):
n_joints = len(joint_indexes)
xml_timestep = xml_frame.find('Timestep')
if xml_timestep is None:
raise RuntimeError('<Timestep> not found')
timestep = float(xml_timestep.text)
xml_joint_pos = xml_frame.find('JointPosition')
if xml_joint_pos is None:
raise RuntimeError('<JointPosition> not found')
joint_pos = _parse_list(xml_joint_pos, n_joints, joint_indexes)
# Optional attributes
joint_vel = joint_acc = root_pos = root_rot = angular_momentum = com = None
xml_joint_vel = xml_frame.find('JointVelocity')
if xml_joint_vel is not None:
joint_vel = _parse_list(xml_joint_vel, n_joints, joint_indexes)
xml_joint_acc = xml_frame.find('JointAcceleration')
if xml_joint_acc is not None:
joint_acc = _parse_list(xml_joint_acc, n_joints, joint_indexes)
xml_root_pos = xml_frame.find('RootPosition')
if xml_root_pos is not None:
root_pos = _parse_list(xml_root_pos, 3)
xml_root_rot = xml_frame.find('RootRotation')
if xml_root_rot is not None:
root_rot = _parse_list(xml_root_rot, 3)
xml_angular_momentum = xml_frame.find('AngularMomentum')
if xml_angular_momentum is not None:
angular_momentum = _parse_list(xml_angular_momentum, 3)
xml_com = xml_frame.find('CoM')
if xml_com is not None:
com = _parse_list(xml_com, 3)
return Frame(timestep=timestep, root_pos=root_pos, root_rot=root_rot,
joint_pos=joint_pos, joint_acc=joint_acc, joint_vel=joint_vel,
angular_momentum=angular_momentum, com=com)
def _parse_list(xml_elem, length, indexes=None):
if indexes is None:
indexes = range(length)
elems = [float(x) for idx, x in enumerate(xml_elem.text.rstrip().split(' ')) if idx in indexes]
if len(elems) != length:
raise RuntimeError('invalid number of elements')
return elems
| |
#!python
#
# Format Identification for Digital Objects
import cStringIO, zipfile, os
import hashlib
import urllib
from xml.etree import ElementTree as ET
class NS:
"""Helper class for XML name spaces in ElementTree.
Use like MYNS=NS("{http://some/uri}") and then
MYNS(tag1/tag2).
"""
def __init__(self, uri):
self.uri = uri
def __getattr__(self, tag):
return self.uri + tag
def __call__(self, path):
return "/".join(getattr(self, tag) for tag in path.split("/"))
# XHTML namespace
XHTML = NS("{http://www.w3.org/1999/xhtml}")
# TNA namespace
TNA = NS("{http://pronom.nationalarchives.gov.uk}")
def get_text_tna(element, tag, default=''):
"""Helper function to return the text for a tag or path using the TNA namespace.
"""
part = element.find(TNA(tag))
return part.text.strip() if part != None and part.text != None else default
def prettify(elem):
"""Return a pretty-printed XML string for the Element.
"""
from xml.dom import minidom
rough_string = ET.tostring(elem, 'UTF-8')
reparsed = minidom.parseString(rough_string)
return reparsed.toprettyxml(indent=" ")
class FormatInfo:
def __init__(self, pronom_files, format_list=[]):
self.info = {}
self.formats = []
self.pronom_files = pronom_files
for f in format_list:
self.add_format(f)
def save(self, dst):
"""Write the fido XML format definitions to @param dst
"""
tree = ET.ElementTree(ET.Element('formats', {'version':'0.3',
'xmlns:xsi' : "http://www.w3.org/2001/XMLSchema-instance",
'xsi:noNamespaceSchemaLocation': "fido-formats-0.3.xsd",
'xmlns:dc': "http://purl.org/dc/elements/1.1/",
'xmlns:dcterms': "http://purl.org/dc/terms/"}))
root = tree.getroot()
for f in self.formats:
if f.find('signature'):
root.append(f)
self.indent(root)
with open(dst, 'wb') as out:
print >>out, ET.tostring(root,encoding='UTF-8')
def indent(self, elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def load_pronom_xml(self, puid_filter=None):
"""Load the pronom XML from self.pronom_files and convert it to fido XML.
As a side-effect, set self.formats to a list of ElementTree.Element
If a @param puid is specified, only that one will be loaded.
"""
formats = []
try:
zip = zipfile.ZipFile(self.pronom_files, 'r')
for item in zip.infolist():
try:
stream = zip.open(item)
# Work is done here!
format = self.parse_pronom_xml(stream, puid_filter)
if format != None:
formats.append(format)
finally:
stream.close()
finally:
zip.close()
# Replace the formatID with puids in has_priority_over
id_map = {}
for element in formats:
puid = element.find('puid').text
pronom_id = element.find('pronom_id').text
id_map[pronom_id] = puid
for element in formats:
for rel in element.findall('has_priority_over'):
rel.text = id_map[rel.text]
self._sort_formats(formats)
self.formats = formats
def parse_pronom_xml(self, source, puid_filter=None):
"""Read a pronom XML from @param source, convert it to fido XML and
@return ET.ElementTree Element representing it.
If a @param puid is specified, only that one will be loaded.
"""
pronom_xml = ET.parse(source)
pronom_root = pronom_xml.getroot()
pronom_format = pronom_root.find(TNA('report_format_detail/FileFormat'))
fido_format = ET.Element('format')
# Get the base Format information
for id in pronom_format.findall(TNA('FileFormatIdentifier')):
type = get_text_tna(id, 'IdentifierType')
if type == 'PUID':
puid = get_text_tna(id, 'Identifier')
ET.SubElement(fido_format, 'puid').text = puid
if puid_filter != None and puid != puid_filter:
return None
# A bit clumsy. I want to have puid first, then mime, then container.
for id in pronom_format.findall(TNA('FileFormatIdentifier')):
type = get_text_tna(id, 'IdentifierType')
if type == 'MIME':
ET.SubElement(fido_format, 'mime').text = get_text_tna(id, 'Identifier')
elif type == 'PUID':
puid = get_text_tna(id, 'Identifier')
if puid == 'x-fmt/263':
ET.SubElement(fido_format, 'container').text = 'zip'
elif puid == 'x-fmt/265':
ET.SubElement(fido_format, 'container').text = 'tar'
ET.SubElement(fido_format, 'name').text = get_text_tna(pronom_format, 'FormatName')
ET.SubElement(fido_format, 'version').text = get_text_tna(pronom_format, 'FormatVersion')
ET.SubElement(fido_format, 'alias').text = get_text_tna(pronom_format, 'FormatAliases')
ET.SubElement(fido_format, 'pronom_id').text = get_text_tna(pronom_format, 'FormatID')
# Get the extensions from the ExternalSignature
for x in pronom_format.findall(TNA('ExternalSignature')):
ET.SubElement(fido_format, 'extension').text = get_text_tna(x, 'Signature')
for id in pronom_format.findall(TNA('FileFormatIdentifier')):
type = get_text_tna(id, 'IdentifierType')
if type == 'Apple Uniform Type Identifier':
ET.SubElement(fido_format, 'apple_uid').text = get_text_tna(id, 'Identifier')
# Handle the relationships
for x in pronom_format.findall(TNA('RelatedFormat')):
rel = get_text_tna(x, 'RelationshipType')
if rel == 'Has priority over':
ET.SubElement(fido_format, 'has_priority_over').text = get_text_tna(x, 'RelatedFormatID')
# Get the InternalSignature information
for pronom_sig in pronom_format.findall(TNA('InternalSignature')):
fido_sig = ET.SubElement(fido_format, 'signature')
ET.SubElement(fido_sig, 'name').text = get_text_tna(pronom_sig, 'SignatureName')
# There are some funny chars in the notes, which caused me trouble and it is a unicode string,
ET.SubElement(fido_sig, 'note').text = get_text_tna(pronom_sig, 'SignatureNote').encode('UTF-8')
for pronom_pat in pronom_sig.findall(TNA('ByteSequence')):
fido_pat = ET.SubElement(fido_sig, 'pattern')
pos = fido_position(get_text_tna(pronom_pat, 'PositionType'))
bytes = get_text_tna(pronom_pat, 'ByteSequenceValue')
offset = get_text_tna(pronom_pat, 'Offset')
max_offset = get_text_tna(pronom_pat, 'MaxOffset')
if max_offset == None:
pass
regex = convert_to_regex(bytes, 'Little', pos, offset, max_offset)
ET.SubElement(fido_pat, 'position').text = pos
ET.SubElement(fido_pat, 'pronom_pattern').text = bytes
ET.SubElement(fido_pat, 'regex').text = regex
# Get the format details
fido_details = ET.SubElement(fido_format,'details')
ET.SubElement(fido_details, 'dc:description').text = get_text_tna(pronom_format, 'FormatDescription').encode('utf8')
ET.SubElement(fido_details, 'dcterms:available').text = get_text_tna(pronom_format, 'ReleaseDate')
ET.SubElement(fido_details, 'dc:creator').text = get_text_tna(pronom_format, 'Developers/DeveloperCompoundName')
ET.SubElement(fido_details, 'dcterms:publisher').text = get_text_tna(pronom_format, 'Developers/OrganisationName')
for x in pronom_format.findall(TNA('RelatedFormat')):
rel = get_text_tna(x, 'RelationshipType')
if rel == 'Is supertype of':
ET.SubElement(fido_details, 'is_supertype_of').text = get_text_tna(x, 'RelatedFormatID')
for x in pronom_format.findall(TNA('RelatedFormat')):
rel = get_text_tna(x, 'RelationshipType')
if rel == 'Is subtype of':
ET.SubElement(fido_details, 'is_subtype_of').text = get_text_tna(x, 'RelatedFormatID')
ET.SubElement(fido_details, 'content_type').text = get_text_tna(pronom_format, 'FormatTypes')
# References
for x in pronom_format.findall(TNA("Document")):
r = ET.SubElement(fido_details,'reference')
ET.SubElement(r, 'dc:title').text = get_text_tna(x, 'TitleText')
ET.SubElement(r, 'dc:creator').text = get_text_tna(x, 'Author/AuthorCompoundName')
ET.SubElement(r, 'dc:publisher').text = get_text_tna(x, 'Publisher/PublisherCompoundName')
ET.SubElement(r, 'dcterms:available').text = get_text_tna(x, 'PublicationDate')
for id in x.findall(TNA('DocumentIdentifier')):
type = get_text_tna(id, 'IdentifierType')
if type == 'URL':
ET.SubElement(r, 'dc:identifier').text = "http://"+get_text_tna(id, 'Identifier')
else:
ET.SubElement(r, 'dc:identifier').text = get_text_tna(id, 'IdentifierType')+":"+get_text_tna(id, 'Identifier')
ET.SubElement(r, 'dc:description').text = get_text_tna(x, 'DocumentNote')
ET.SubElement(r, 'dc:type').text = get_text_tna(x, 'DocumentType')
ET.SubElement(r, 'dcterms:license').text = get_text_tna(x, 'AvailabilityDescription')+" "+get_text_tna(x, 'AvailabilityNote')
ET.SubElement(r, 'dc:rights').text = get_text_tna(x, 'DocumentIPR')
# Examples
for x in pronom_format.findall(TNA("ReferenceFile")):
rf = ET.SubElement(fido_details,'example_file')
ET.SubElement(rf, 'dc:title').text = get_text_tna(x, 'ReferenceFileName')
ET.SubElement(rf, 'dc:description').text = get_text_tna(x, 'ReferenceFileDescription')
checksum = ""
for id in x.findall(TNA('ReferenceFileIdentifier')):
type = get_text_tna(id, 'IdentifierType')
if type == 'URL':
url = "http://"+get_text_tna(id, 'Identifier')
ET.SubElement(rf, 'dc:identifier').text = url
# And calculate the checksum of this resource:
m = hashlib.md5()
sock = urllib.urlopen(url)
m.update(sock.read())
sock.close()
checksum=m.hexdigest()
else:
ET.SubElement(rf, 'dc:identifier').text = get_text_tna(id, 'IdentifierType')+":"+get_text_tna(id, 'Identifier')
ET.SubElement(rf, 'dcterms:license').text = ""
ET.SubElement(rf, 'dc:rights').text = get_text_tna(x, 'ReferenceFileIPR')
checksumElement = ET.SubElement(rf, 'checksum')
checksumElement.text = checksum
checksumElement.attrib['type'] = "md5"
# Record Metadata
md = ET.SubElement(fido_details,'record_metadata')
ET.SubElement(md, 'status').text ='unknown'
ET.SubElement(md, 'dc:creator').text = get_text_tna(pronom_format, 'ProvenanceName')
ET.SubElement(md, 'dcterms:created').text = get_text_tna(pronom_format, 'ProvenanceSourceDate')
ET.SubElement(md, 'dcterms:modified').text = get_text_tna(pronom_format, 'LastUpdatedDate')
ET.SubElement(md, 'dc:description').text = get_text_tna(pronom_format, 'ProvenanceDescription').encode('utf8')
return fido_format
#FIXME: I don't think that this quite works yet!
def _sort_formats(self, formatlist):
"""Sort the format list based on their priority relationships so higher priority
formats appear earlier in the list.
"""
def compare_formats(f1, f2):
f1ID = f1.find('puid').text
f2ID = f2.find('puid').text
for worse in f1.findall('has_priority_over'):
if worse.text == f2ID:
return - 1
for worse in f2.findall('has_priority_over'):
if worse.text == f1ID:
return 1
if f1ID < f2ID:
return - 1
elif f1ID == f2ID:
return 0
else:
return 1
return sorted(formatlist, cmp=compare_formats)
def fido_position(pronom_position):
"""@return BOF/EOF/VAR instead of the more verbose pronom position names.
"""
if pronom_position == 'Absolute from BOF':
return 'BOF'
elif pronom_position == 'Absolute from EOF':
return 'EOF'
elif pronom_position == 'Variable':
return 'VAR'
else:
raise Exception("Unknown pronom PositionType=" + pronom_position)
def _convert_err_msg(msg, c, i, chars):
return "Conversion: {0}: char='{1}', at pos {2} in \n {3}\n {4}^".format(msg, c, i, chars, i * ' ')
def doByte(chars, i, littleendian):
"""Convert two chars[i] and chars[i+1] into a byte.
@return a tuple (byte, 2)
"""
c1 = '0123456789ABCDEF'.find(chars[i].upper())
c2 = '0123456789ABCDEF'.find(chars[i + 1].upper())
if (c1 < 0 or c2 < 0):
raise Exception(_convert_err_msg('bad byte sequence', chars[i:i + 2], i, chars))
if littleendian:
val = chr(16 * c1 + c2)
else:
val = chr(c1 + 16 * c2)
return (escape(val), 2)
# \a\b\n\r\t\v
_ordinary = frozenset(' !"#%&\',-/0123456789:;<=>@ABCDEFGHIJKLMNOPQRSTUVWXYZ_`abcdefghijklmnopqrstuvwxyz~')
_special = '$()*+.?[]^\\{|}'
_hex = '0123456789abcdef'
def _escape_char(c):
if c in '\n':
return '\\n'
elif c == '\r':
return '\\r'
elif c in _special:
return '\\' + c
else:
(high, low) = divmod(ord(c), 16)
return '\\x' + _hex[high] + _hex[low]
def escape(string):
"Escape characters in pattern that are non-printable, non-ascii, or special for regexes."
return ''.join(c if c in _ordinary else _escape_char(c) for c in string)
def convert_to_regex(chars, endianness='', pos='BOF', offset='0', maxoffset=''):
"""Convert
@param chars, a pronom bytesequence, into a
@return regular expression.
Endianness is not used.
"""
if 'Big' in endianness:
littleendian = False
else:
littleendian = True
if len(offset) == 0:
offset = '0'
if len(maxoffset) == 0:
maxoffset = None
buf = cStringIO.StringIO()
buf.write("(?s)") #If a regex starts with (?s), it is equivalent to DOTALL.
i = 0
state = 'start'
if 'BOF' in pos:
buf.write('\\A')
if offset != '0':
buf.write('.{')
buf.write(str(offset))
if maxoffset != None:
buf.write(',' + maxoffset)
buf.write('}')
elif maxoffset != None:
buf.write('.{0,' + maxoffset + '}')
while True:
if i == len(chars):
break
#print _convert_err_msg(state,chars[i],i,chars)
if state == 'start':
if chars[i].isalnum():
state = 'bytes'
elif chars[i] == '[' and chars[i + 1] == '!':
state = 'non-match'
elif chars[i] == '[':
state = 'bracket'
elif chars[i] == '{':
state = 'curly'
elif chars[i] == '(':
state = 'paren'
elif chars[i] in '*+?':
state = 'specials'
else:
raise Exception(_convert_err_msg('Illegal character in start', chars[i], i, chars))
elif state == 'bytes':
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
state = 'start'
elif state == 'non-match':
buf.write('(!')
i += 2
while True:
if chars[i].isalnum():
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
elif chars[i] == ']':
break
else:
raise Exception(_convert_err_msg('Illegal character in non-match', chars[i], i, chars))
buf.write(')')
i += 1
state = 'start'
elif state == 'bracket':
try:
buf.write('[')
i += 1
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
assert(chars[i] == ':')
buf.write('-')
i += 1
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
assert(chars[i] == ']')
buf.write(']')
i += 1
except Exception:
print _convert_err_msg('Illegal character in bracket', chars[i], i, chars)
raise
if i < len(chars) and chars[i] == '{':
state = 'curly-after-bracket'
else:
state = 'start'
elif state == 'paren':
buf.write('(?:')
i += 1
while True:
if chars[i].isalnum():
(byt, inc) = doByte(chars, i, littleendian)
buf.write(byt)
i += inc
elif chars[i] == '|':
buf.write('|')
i += 1
elif chars[i] == ')':
break
else:
raise Exception(_convert_err_msg('Illegal character in paren', chars[i], i, chars))
buf.write(')')
i += 1
state = 'start'
elif state in ['curly', 'curly-after-bracket']:
# {nnnn} or {nnn-nnn} or {nnn-*}
# {nnn} or {nnn,nnn} or {nnn,}
# when there is a curly-after-bracket, then the {m,n} applies to the bracketed item
# The above, while sensible, appears to be incorrect. A '.' is always needed.
# for droid equiv behavior
#if state == 'curly':
buf.write('.')
buf.write('{')
i += 1 # skip the (
while True:
if chars[i].isalnum():
buf.write(chars[i])
i += 1
elif chars[i] == '-':
buf.write(',')
i += 1
elif chars[i] == '*': # skip the *
i += 1
elif chars[i] == '}':
break
else:
raise Exception(_convert_err_msg('Illegal character in curly', chars[i], i, chars))
buf.write('}')
i += 1 # skip the )
state = 'start'
elif state == 'specials':
if chars[i] == '*':
buf.write('.*')
i += 1
elif chars[i] == '+':
buf.write('.+')
i += 1
elif chars[i] == '?':
if chars[i + 1] != '?':
raise Exception(_convert_err_msg('Illegal character after ?', chars[i + 1], i + 1, chars))
buf.write('.?')
i += 2
state = 'start'
else:
raise Exception('Illegal state {0}'.format(state))
if 'EOF' in pos:
if offset != '0':
buf.write('.{' + offset)
if maxoffset != None:
buf.write(',' + maxoffset)
buf.write('}')
elif maxoffset != None:
buf.write('.{0,' + maxoffset + '}')
buf.write('\\Z')
val = buf.getvalue()
buf.close()
return val
if __name__ == '__main__':
import sys
from argparselocal import ArgumentParser
arglist = sys.argv[1:]
mydir = os.path.abspath(os.path.dirname(__file__))
parser = ArgumentParser(description='Produce the fido format xml that is loaded at run-time')
parser.add_argument('-input', default=os.path.join(mydir, 'conf', 'pronom-xml.zip'), help='input file, a zip containing Pronom xml files')
parser.add_argument('-output', default=os.path.join(mydir, 'conf', 'formats.xml'), help='output file')
parser.add_argument('-puid', default=None, help='a particular PUID record to extract')
# PROCESS ARGUMENTS
args = parser.parse_args(arglist)
# print os.path.abspath(args.input), os.path.abspath(args.output)
info = FormatInfo(args.input)
info.load_pronom_xml(args.puid)
info.save(args.output)
print >> sys.stderr, 'FIDO: {0} formats'.format(len(info.formats))
| |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import io
import random
import struct
import unittest
import common
import config
import net_crypto
import mle
import network_data
from ipaddress import ip_address
def any_address():
return random.getrandbits(16)
def any_receiver():
return random.getrandbits(1)
def any_secure():
return random.getrandbits(1)
def any_device_type():
return random.getrandbits(1)
def any_network_data():
return random.getrandbits(1)
mode_map = {
0x00: {"receiver": 0, "secure": 0, "device_type": 0, "network_data": 0},
0x08: {"receiver": 1, "secure": 0, "device_type": 0, "network_data": 0},
0x04: {"receiver": 0, "secure": 1, "device_type": 0, "network_data": 0},
0x0C: {"receiver": 1, "secure": 1, "device_type": 0, "network_data": 0},
0x02: {"receiver": 0, "secure": 0, "device_type": 1, "network_data": 0},
0x0A: {"receiver": 1, "secure": 0, "device_type": 1, "network_data": 0},
0x06: {"receiver": 0, "secure": 1, "device_type": 1, "network_data": 0},
0x0E: {"receiver": 1, "secure": 1, "device_type": 1, "network_data": 0},
0x01: {"receiver": 0, "secure": 0, "device_type": 0, "network_data": 1},
0x09: {"receiver": 1, "secure": 0, "device_type": 0, "network_data": 1},
0x05: {"receiver": 0, "secure": 1, "device_type": 0, "network_data": 1},
0x0D: {"receiver": 1, "secure": 1, "device_type": 0, "network_data": 1},
0x03: {"receiver": 0, "secure": 0, "device_type": 1, "network_data": 1},
0x0B: {"receiver": 1, "secure": 0, "device_type": 1, "network_data": 1},
0x07: {"receiver": 0, "secure": 1, "device_type": 1, "network_data": 1},
0x0F: {"receiver": 1, "secure": 1, "device_type": 1, "network_data": 1}
}
def any_mode():
return random.getrandbits(4)
def any_timeout():
return random.getrandbits(32)
def any_challenge():
length = random.randint(4, 8)
return bytearray(random.getrandbits(8) for _ in range(length))
def any_response():
length = random.randint(4, 8)
return bytearray(random.getrandbits(8) for _ in range(length))
def any_link_layer_frame_counter():
return random.getrandbits(32)
def any_mle_frame_counter():
return random.getrandbits(32)
def any_output():
return random.getrandbits(2)
def any_input():
return random.getrandbits(2)
def any_route():
return random.getrandbits(4)
def any_id_sequence():
return random.getrandbits(1)
def any_router_id_mask():
return random.getrandbits(64)
def any_link_quality_and_route_data(length=None):
length = length if length is not None else random.randint(0, 63)
return [random.getrandbits(8) for _ in range(length)]
def any_partition_id():
return random.getrandbits(32)
def any_weighting():
return random.getrandbits(8)
def any_data_version():
return random.getrandbits(8)
def any_stable_data_version():
return random.getrandbits(8)
def any_leader_router_id():
return random.getrandbits(8)
scan_mask_map = {
0x00: {"router": 0, "end_device": 0},
0x40: {"router": 0, "end_device": 1},
0x80: {"router": 1, "end_device": 0},
0xC0: {"router": 1, "end_device": 1},
}
def any_scan_mask_router():
return random.getrandbits(1)
def any_scan_mask_end_device():
return random.getrandbits(1)
def any_scan_mask():
return (random.getrandbits(2) << 6)
def any_link_margin():
return random.getrandbits(8)
def any_status():
return random.getrandbits(8)
def any_version():
return random.getrandbits(16)
def any_channel_page():
return random.getrandbits(8)
def any_channel():
return random.getrandbits(16)
def any_pan_id():
return random.getrandbits(16)
def any_timestamp_seconds():
return random.getrandbits(48)
def any_timestamp_ticks():
return random.getrandbits(15)
def any_u():
return random.getrandbits(1)
def any_pp():
return random.getrandbits(2)
def any_link_quality_3():
return random.getrandbits(8)
def any_link_quality_2():
return random.getrandbits(8)
def any_link_quality_1():
return random.getrandbits(8)
def any_leader_cost():
return random.getrandbits(8)
def any_id_sequence():
return random.getrandbits(8)
def any_active_routers():
return random.getrandbits(8)
def any_sed_buffer_size():
return random.getrandbits(16)
def any_sed_datagram_count():
return random.getrandbits(8)
def any_tlvs(length=None):
if length is None:
length = random.randint(0, 16)
return [random.getrandbits(8) for _ in range(length)]
def any_cid():
return random.getrandbits(4)
def any_iid():
return bytearray([random.getrandbits(8) for _ in range(8)])
def any_ipv6_address():
return bytearray([random.getrandbits(8) for _ in range(16)])
def any_addresses():
addresses = [
mle.AddressCompressed(any_cid(), any_iid()),
mle.AddressFull(any_ipv6_address())
]
return addresses
def any_key_id_mode():
return random.getrandbits(2)
def any_security_level():
return random.getrandbits(3)
def any_frame_counter():
return random.getrandbits(32)
def any_key_id(key_id_mode):
if key_id_mode == 0:
length = 0
elif key_id_mode == 1:
length = 1
elif key_id_mode == 2:
length = 5
elif key_id_mode == 3:
length = 9
return bytearray([random.getrandbits(8) for _ in range(length)])
def any_eui64():
return bytearray([random.getrandbits(8) for _ in range(8)])
class TestSourceAddress(unittest.TestCase):
def test_should_return_address_value_when_address_property_is_called(self):
# GIVEN
address = any_address()
source_address = mle.SourceAddress(address)
# WHEN
actual_address = source_address.address
# THEN
self.assertEqual(address, actual_address)
class TestSourceAddressFactory(unittest.TestCase):
def test_should_create_SourceAddress_from_bytearray_when_parse_method_is_called(self):
# GIVEN
address = any_address()
factory = mle.SourceAddressFactory()
data = struct.pack(">H", address)
# WHEN
actual_source_address = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_source_address, mle.SourceAddress))
self.assertEqual(address, actual_source_address.address)
class TestMode(unittest.TestCase):
def test_should_return_receiver_value_when_receiver_property_is_called(self):
# GIVEN
receiver = any_receiver()
mode = mle.Mode(receiver, any_secure(), any_device_type(), any_network_data())
# WHEN
actual_receiver = mode.receiver
# THEN
self.assertEqual(receiver, actual_receiver)
def test_should_return_secure_value_when_secure_property_is_called(self):
# GIVEN
secure = any_secure()
mode = mle.Mode(any_receiver(), secure, any_device_type(), any_network_data())
# WHEN
actual_secure = mode.secure
# THEN
self.assertEqual(secure, actual_secure)
def test_should_return_device_type_value_when_device_type_property_is_called(self):
# GIVEN
device_type = any_device_type()
mode = mle.Mode(any_receiver(), any_secure(), device_type, any_network_data())
# WHEN
actual_device_type = mode.device_type
# THEN
self.assertEqual(device_type, actual_device_type)
def test_should_return_network_data_value_when_network_data_property_is_called(self):
# GIVEN
network_data = any_network_data()
mode = mle.Mode(any_receiver(), any_secure(), any_device_type(), network_data)
# WHEN
actual_network_data = mode.network_data
# THEN
self.assertEqual(network_data, actual_network_data)
class TestModeFactory(unittest.TestCase):
def test_should_create_Mode_from_bytearray_when_parse_method_is_called(self):
# GIVEN
mode = any_mode()
factory = mle.ModeFactory()
data = bytearray([mode])
# WHEN
actual_mode = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_mode, mle.Mode))
self.assertEqual(mode_map[mode]["receiver"], actual_mode.receiver)
self.assertEqual(mode_map[mode]["secure"], actual_mode.secure)
self.assertEqual(mode_map[mode]["device_type"], actual_mode.device_type)
self.assertEqual(mode_map[mode]["network_data"], actual_mode.network_data)
class TestTimeout(unittest.TestCase):
def test_should_return_timeout_value_when_timeout_property_is_called(self):
# GIVEN
timeout = any_timeout()
timeout_obj = mle.Timeout(timeout)
# WHEN
actual_timeout = timeout_obj.timeout
# THEN
self.assertEqual(timeout, actual_timeout)
class TestTimeoutFactory(unittest.TestCase):
def test_should_create_Timeout_from_bytearray_when_parse_method_is_called(self):
# GIVEN
timeout = any_timeout()
factory = mle.TimeoutFactory()
data = struct.pack(">I", timeout)
# WHEN
actual_timeout = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_timeout, mle.Timeout))
self.assertEqual(timeout, actual_timeout.timeout)
class TestChallenge(unittest.TestCase):
def test_should_return_challenge_value_when_challenge_property_is_called(self):
# GIVEN
challenge = any_challenge()
challenge_obj = mle.Challenge(challenge)
# WHEN
actual_challenge = challenge_obj.challenge
# THEN
self.assertEqual(challenge, actual_challenge)
class TestChallengeFactory(unittest.TestCase):
def test_should_create_Challenge_from_bytearray_when_parse_method_is_called(self):
# GIVEN
challenge = any_challenge()
factory = mle.ChallengeFactory()
data = challenge
# WHEN
actual_challenge = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_challenge, mle.Challenge))
self.assertEqual(challenge, actual_challenge.challenge)
class TestResponse(unittest.TestCase):
def test_should_return_response_value_when_response_property_is_called(self):
# GIVEN
response = any_response()
response_obj = mle.Response(response)
# WHEN
actual_response = response_obj.response
# THEN
self.assertEqual(response, actual_response)
class TestResponseFactory(unittest.TestCase):
def test_should_create_Challenge_from_bytearray_when_parse_method_is_called(self):
# GIVEN
response = any_response()
factory = mle.ResponseFactory()
data = response
# WHEN
actual_response = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_response, mle.Response))
self.assertEqual(response, actual_response.response)
class TestLinkLayerFrameCounter(unittest.TestCase):
def test_should_return_frame_counter_value_when_frame_counter_property_is_called(self):
# GIVEN
link_layer_frame_counter = any_link_layer_frame_counter()
link_layer_frame_counter_obj = mle.LinkLayerFrameCounter(link_layer_frame_counter)
# WHEN
actual_link_layer_frame_counter = link_layer_frame_counter_obj.frame_counter
# THEN
self.assertEqual(link_layer_frame_counter, actual_link_layer_frame_counter)
class TestLinkLayerFrameCounterFactory(unittest.TestCase):
def test_should_create_LinkLayerFrameCounter_from_bytearray_when_parse_method_is_called(self):
# GIVEN
link_layer_frame_counter = any_link_layer_frame_counter()
factory = mle.LinkLayerFrameCounterFactory()
data = struct.pack(">I", link_layer_frame_counter)
# WHEN
actual_link_layer_frame_counter = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_link_layer_frame_counter, mle.LinkLayerFrameCounter))
self.assertEqual(link_layer_frame_counter, actual_link_layer_frame_counter.frame_counter)
class TestMleFrameCounter(unittest.TestCase):
def test_should_return_frame_counter_value_when_frame_counter_property_is_called(self):
# GIVEN
mle_frame_counter = any_mle_frame_counter()
mle_frame_counter_obj = mle.MleFrameCounter(mle_frame_counter)
# WHEN
actual_mle_frame_counter = mle_frame_counter_obj.frame_counter
# THEN
self.assertEqual(mle_frame_counter, actual_mle_frame_counter)
class TestMleFrameCounterFactory(unittest.TestCase):
def test_should_create_MleFrameCounter_from_bytearray_when_parse_method_is_called(self):
# GIVEN
mle_frame_counter = any_mle_frame_counter()
factory = mle.MleFrameCounterFactory()
data = struct.pack(">I", mle_frame_counter)
# WHEN
actual_mle_frame_counter = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_mle_frame_counter, mle.MleFrameCounter))
self.assertEqual(mle_frame_counter, actual_mle_frame_counter.frame_counter)
class TestLinkQualityAndRouteData(unittest.TestCase):
def test_should_return_output_value_when_output_property_is_called(self):
# GIVEN
output = any_output()
lqrd = mle.LinkQualityAndRouteData(output, any_input(), any_route())
# WHEN
actual_output = lqrd.output
# THEN
self.assertEqual(output, actual_output)
def test_should_return_input_value_when_input_property_is_called(self):
# GIVEN
_input = any_input()
lqrd = mle.LinkQualityAndRouteData(any_output(), _input, any_route())
# WHEN
actual_input = lqrd.input
# THEN
self.assertEqual(_input, actual_input)
def test_should_return_route_value_when_route_property_is_called(self):
# GIVEN
route = any_route()
lqrd = mle.LinkQualityAndRouteData(any_output(), any_input(), route)
# WHEN
actual_route = lqrd.route
# THEN
self.assertEqual(route, actual_route)
class TestLinkQualityAndRouteDataFactory(unittest.TestCase):
def test_should_create_LinkQualityAndRouteData_from_well_known_byte_when_parse_method_is_called(self):
# GIVEN
factory = mle.LinkQualityAndRouteDataFactory()
data = bytearray([0x66])
# WHEN
actual_lqrd = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertEqual(1, actual_lqrd.output)
self.assertEqual(2, actual_lqrd.input)
self.assertEqual(6, actual_lqrd.route)
def test_should_create_LinkQualityAndRouteData_from_bytearray_when_parse_method_is_called(self):
# GIVEN
output = any_output()
_input = any_input()
route = any_route()
lqrd = (output << 6) | (_input << 4) | route
factory = mle.LinkQualityAndRouteDataFactory()
data = bytearray([lqrd])
# WHEN
actual_lqrd = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_lqrd, mle.LinkQualityAndRouteData))
self.assertEqual(output, actual_lqrd.output)
self.assertEqual(_input, actual_lqrd.input)
self.assertEqual(route, actual_lqrd.route)
class TestRoute64(unittest.TestCase):
def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
route64_obj = mle.Route64(id_sequence, any_router_id_mask(), any_link_quality_and_route_data())
# WHEN
actual_id_sequence = route64_obj.id_sequence
# THEN
self.assertEqual(id_sequence, actual_id_sequence)
def test_should_return_router_id_mask_value_when_router_id_mask_property_is_called(self):
# GIVEN
router_id_mask = any_router_id_mask()
route64_obj = mle.Route64(any_id_sequence(), router_id_mask, any_link_quality_and_route_data())
# WHEN
actual_router_id_mask = route64_obj.router_id_mask
# THEN
self.assertEqual(router_id_mask, actual_router_id_mask)
def test_should_return_link_quality_and_route_data_value_when_link_quality_and_route_data_property_is_called(self):
# GIVEN
link_quality_and_route_data = any_link_quality_and_route_data()
route64_obj = mle.Route64(any_id_sequence(), any_router_id_mask(), link_quality_and_route_data)
# WHEN
actual_link_quality_and_route_data = route64_obj.link_quality_and_route_data
# THEN
self.assertEqual(link_quality_and_route_data, actual_link_quality_and_route_data)
class TestRoute64Factory(unittest.TestCase):
def test_should_create_Route64_from_bytearray_when_parse_method_is_called(self):
# GIVEN
class DummyLQRDFactory:
def parse(self, data, context):
return ord(data.read(1))
id_sequence = any_id_sequence()
router_id_mask = any_router_id_mask()
router_count = 0
for i in range(64):
router_count += (router_id_mask >> i) & 0x01
link_quality_and_route_data = any_link_quality_and_route_data(router_count)
factory = mle.Route64Factory(DummyLQRDFactory())
data = bytearray([id_sequence]) + struct.pack(">Q", router_id_mask) + bytearray(link_quality_and_route_data)
# WHEN
actual_route64 = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_route64, mle.Route64))
self.assertEqual(id_sequence, actual_route64.id_sequence)
self.assertEqual(router_id_mask, actual_route64.router_id_mask)
self.assertEqual([b for b in link_quality_and_route_data], actual_route64.link_quality_and_route_data)
class TestAddress16(unittest.TestCase):
def test_should_return_address_value_when_address_property_is_called(self):
# GIVEN
address = any_address()
address16 = mle.Address16(address)
# WHEN
actual_address = address16.address
# THEN
self.assertEqual(address, actual_address)
class TestAddress16Factory(unittest.TestCase):
def test_should_create_Address16_from_bytearray_when_parse_method_is_called(self):
# GIVEN
address = any_address()
factory = mle.Address16Factory()
data = struct.pack(">H", address)
# WHEN
actual_address16 = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_address16, mle.Address16))
self.assertEqual(address, actual_address16.address)
class TestLeaderData(unittest.TestCase):
def test_should_return_partition_id_value_when_partition_id_property_is_called(self):
# GIVEN
partition_id = any_partition_id()
leader_data = mle.LeaderData(partition_id, any_weighting(), any_data_version(),
any_stable_data_version(), any_leader_router_id())
# WHEN
actual_partition_id = leader_data.partition_id
# THEN
self.assertEqual(partition_id, actual_partition_id)
def test_should_return_weighting_value_when_weighting_property_is_called(self):
# GIVEN
weighting = any_weighting()
leader_data = mle.LeaderData(any_partition_id(), weighting, any_data_version(),
any_stable_data_version(), any_leader_router_id())
# WHEN
actual_weighting = leader_data.weighting
# THEN
self.assertEqual(weighting, actual_weighting)
def test_should_return_data_version_value_when_data_version_property_is_called(self):
# GIVEN
data_version = any_data_version()
leader_data = mle.LeaderData(any_partition_id(), any_weighting(), data_version,
any_stable_data_version(), any_leader_router_id())
# WHEN
actual_data_version = leader_data.data_version
# THEN
self.assertEqual(data_version, actual_data_version)
def test_should_return_stable_data_version_value_when_stable_data_version_property_is_called(self):
# GIVEN
stable_data_version = any_stable_data_version()
leader_data = mle.LeaderData(any_partition_id(), any_weighting(), any_data_version(),
stable_data_version, any_leader_router_id())
# WHEN
actual_stable_data_version = leader_data.stable_data_version
# THEN
self.assertEqual(stable_data_version, actual_stable_data_version)
def test_should_return_leader_router_id_value_when_leader_router_id_property_is_called(self):
# GIVEN
leader_router_id = any_leader_router_id()
leader_data = mle.LeaderData(any_partition_id(), any_weighting(), any_data_version(),
any_stable_data_version(), leader_router_id)
# WHEN
actual_leader_router_id = leader_data.leader_router_id
# THEN
self.assertEqual(leader_router_id, actual_leader_router_id)
class TestLeaderDataFactory(unittest.TestCase):
def test_should_create_Address16_from_bytearray_when_parse_method_is_called(self):
# GIVEN
partition_id = any_partition_id()
weighting = any_weighting()
data_version = any_data_version()
stable_data_version = any_stable_data_version()
leader_router_id = any_leader_router_id()
factory = mle.LeaderDataFactory()
data = bytearray(struct.pack(">I", partition_id)) + \
bytearray([weighting, data_version, stable_data_version, leader_router_id])
# WHEN
actual_leader_data = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_leader_data, mle.LeaderData))
self.assertEqual(partition_id, actual_leader_data.partition_id)
self.assertEqual(weighting, actual_leader_data.weighting)
self.assertEqual(data_version, actual_leader_data.data_version)
self.assertEqual(stable_data_version, actual_leader_data.stable_data_version)
self.assertEqual(leader_router_id, actual_leader_data.leader_router_id)
class TestNetworkData(unittest.TestCase):
def test_should_return_tlvs_value_when_tlvs_property_is_called(self):
# GIVEN
tlvs = any_tlvs()
network_data = mle.NetworkData(tlvs)
# WHEN
actual_tlvs = network_data.tlvs
# THEN
self.assertEqual(tlvs, actual_tlvs)
class TestNetworkDataFactory(unittest.TestCase):
def test_should_create_TlvRequest_from_bytearray_when_parse_method_is_called(self):
# GIVEN
class DummyNetworkTlvsFactory:
def parse(self, data, context):
return [b for b in bytearray(data.read())]
tlvs = any_tlvs()
factory = mle.NetworkDataFactory(DummyNetworkTlvsFactory())
data = bytearray(tlvs)
# WHEN
actual_network_data = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_network_data, mle.NetworkData))
self.assertEqual(tlvs, actual_network_data.tlvs)
class TestTlvRequest(unittest.TestCase):
def test_should_return_tlvs_value_when_tlvs_property_is_called(self):
# GIVEN
tlvs = any_tlvs()
tlv_request = mle.TlvRequest(tlvs)
# WHEN
actual_tlvs = tlv_request.tlvs
# THEN
self.assertEqual(tlvs, actual_tlvs)
class TestTlvRequestFactory(unittest.TestCase):
def test_should_create_TlvRequest_from_bytearray_when_parse_method_is_called(self):
# GIVEN
tlvs = any_tlvs()
factory = mle.TlvRequestFactory()
data = bytearray(tlvs)
# WHEN
actual_tlv_request = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_tlv_request, mle.TlvRequest))
self.assertEqual(tlvs, actual_tlv_request.tlvs)
class TestScanMask(unittest.TestCase):
def test_should_return_router_value_when_router_property_is_called(self):
# GIVEN
router = any_scan_mask_router()
scan_mask = mle.ScanMask(router, any_scan_mask_end_device())
# WHEN
actual_router = scan_mask.router
# THEN
self.assertEqual(router, actual_router)
def test_should_return_end_device_value_when_end_device_property_is_called(self):
# GIVEN
end_device = any_scan_mask_end_device()
scan_mask = mle.ScanMask(any_scan_mask_router(), end_device)
# WHEN
actual_end_device = scan_mask.end_device
# THEN
self.assertEqual(end_device, actual_end_device)
class TestScanMaskFactory(unittest.TestCase):
def test_should_create_ScanMask_from_bytearray_when_parse_method_is_called(self):
# GIVEN
scan_mask = any_scan_mask()
factory = mle.ScanMaskFactory()
data = bytearray([scan_mask])
# WHEN
actual_scan_mask = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_scan_mask, mle.ScanMask))
self.assertEqual(scan_mask_map[scan_mask]["router"], actual_scan_mask.router)
self.assertEqual(scan_mask_map[scan_mask]["end_device"], actual_scan_mask.end_device)
class TestConnectivity(unittest.TestCase):
def test_should_return_pp_value_when_pp_property_is_called(self):
# GIVEN
pp = any_pp()
connectivity_obj = mle.Connectivity(pp,
any_link_quality_3(),
any_link_quality_2(),
any_link_quality_1(),
any_leader_cost(),
any_id_sequence(),
any_active_routers(),
any_sed_buffer_size(),
any_sed_datagram_count())
# WHEN
actual_pp = connectivity_obj.pp
# THEN
self.assertEqual(pp, actual_pp)
def test_should_return_link_quality_3_value_when_link_quality_3_property_is_called(self):
# GIVEN
link_quality_3 = any_link_quality_3()
connectivity_obj = mle.Connectivity(any_pp(),
link_quality_3,
any_link_quality_2(),
any_link_quality_1(),
any_leader_cost(),
any_id_sequence(),
any_active_routers(),
any_sed_buffer_size(),
any_sed_datagram_count())
# WHEN
actual_link_quality_3 = connectivity_obj.link_quality_3
# THEN
self.assertEqual(link_quality_3, actual_link_quality_3)
def test_should_return_link_quality_2_value_when_link_quality_2_property_is_called(self):
# GIVEN
link_quality_2 = any_link_quality_2()
connectivity_obj = mle.Connectivity(any_pp(),
any_link_quality_3(),
link_quality_2,
any_link_quality_1(),
any_leader_cost(),
any_id_sequence(),
any_active_routers(),
any_sed_buffer_size(),
any_sed_datagram_count())
# WHEN
actual_link_quality_2 = connectivity_obj.link_quality_2
# THEN
self.assertEqual(link_quality_2, actual_link_quality_2)
def test_should_return_link_quality_1_value_when_link_quality_1_property_is_called(self):
# GIVEN
link_quality_1 = any_link_quality_1()
connectivity_obj = mle.Connectivity(any_pp(),
any_link_quality_3(),
any_link_quality_2(),
link_quality_1,
any_leader_cost(),
any_id_sequence(),
any_active_routers(),
any_sed_buffer_size(),
any_sed_datagram_count())
# WHEN
actual_link_quality_1 = connectivity_obj.link_quality_1
# THEN
self.assertEqual(link_quality_1, actual_link_quality_1)
def test_should_return_leader_cost_value_when_leader_cost_property_is_called(self):
# GIVEN
leader_cost = any_leader_cost()
connectivity_obj = mle.Connectivity(any_pp(),
any_link_quality_3(),
any_link_quality_2(),
any_link_quality_1(),
leader_cost,
any_id_sequence(),
any_active_routers(),
any_sed_buffer_size(),
any_sed_datagram_count())
# WHEN
actual_leader_cost = connectivity_obj.leader_cost
# THEN
self.assertEqual(leader_cost, actual_leader_cost)
def test_should_return_id_sequence_value_when_id_sequence_property_is_called(self):
# GIVEN
id_sequence = any_id_sequence()
connectivity_obj = mle.Connectivity(any_pp(),
any_link_quality_3(),
any_link_quality_2(),
any_link_quality_1(),
any_leader_cost(),
id_sequence,
any_active_routers(),
any_sed_buffer_size(),
any_sed_datagram_count())
# WHEN
actual_id_sequence = connectivity_obj.id_sequence
# THEN
self.assertEqual(id_sequence, actual_id_sequence)
def test_should_return_active_routers_value_when_active_routers_property_is_called(self):
# GIVEN
active_routers = any_active_routers()
connectivity_obj = mle.Connectivity(any_pp(),
any_link_quality_3(),
any_link_quality_2(),
any_link_quality_1(),
any_leader_cost(),
any_id_sequence(),
active_routers,
any_sed_buffer_size(),
any_sed_datagram_count())
# WHEN
actual_active_routers = connectivity_obj.active_routers
# THEN
self.assertEqual(active_routers, actual_active_routers)
def test_should_return_sed_buffer_size_value_when_sed_buffer_size_property_is_called(self):
# GIVEN
sed_buffer_size = any_sed_buffer_size()
connectivity_obj = mle.Connectivity(any_pp(),
any_link_quality_3(),
any_link_quality_2(),
any_link_quality_1(),
any_leader_cost(),
any_id_sequence(),
any_active_routers(),
sed_buffer_size,
any_sed_datagram_count())
# WHEN
actual_sed_buffer_size = connectivity_obj.sed_buffer_size
# THEN
self.assertEqual(sed_buffer_size, actual_sed_buffer_size)
def test_should_return_sed_datagram_count_value_when_sed_datagram_count_property_is_called(self):
# GIVEN
sed_datagram_count = any_sed_datagram_count()
connectivity_obj = mle.Connectivity(any_pp(),
any_link_quality_3(),
any_link_quality_2(),
any_link_quality_1(),
any_leader_cost(),
any_id_sequence(),
any_active_routers(),
any_sed_buffer_size(),
sed_datagram_count)
# WHEN
actual_sed_datagram_count = connectivity_obj.sed_datagram_count
# THEN
self.assertEqual(sed_datagram_count, actual_sed_datagram_count)
class TestConnectivityFactory(unittest.TestCase):
def test_should_create_Connectivity_from_bytearray_when_parse_method_is_called(self):
# GIVEN
pp = any_pp()
link_quality_3 = any_link_quality_3()
link_quality_2 = any_link_quality_2()
link_quality_1 = any_link_quality_1()
leader_cost = any_leader_cost()
id_sequence = any_id_sequence()
active_routers = any_active_routers()
sed_buffer_size = any_sed_buffer_size()
sed_datagram_count = any_sed_datagram_count()
factory = mle.ConnectivityFactory()
data = bytearray([pp, link_quality_3, link_quality_2, link_quality_1, leader_cost, id_sequence,
active_routers]) + struct.pack(">H", sed_buffer_size) + bytearray([sed_datagram_count])
# WHEN
actual_connectivity = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_connectivity, mle.Connectivity))
self.assertEqual(pp, actual_connectivity.pp)
self.assertEqual(link_quality_3, actual_connectivity.link_quality_3)
self.assertEqual(link_quality_2, actual_connectivity.link_quality_2)
self.assertEqual(link_quality_1, actual_connectivity.link_quality_1)
self.assertEqual(leader_cost, actual_connectivity.leader_cost)
self.assertEqual(id_sequence, actual_connectivity.id_sequence)
self.assertEqual(active_routers, actual_connectivity.active_routers)
self.assertEqual(sed_buffer_size, actual_connectivity.sed_buffer_size)
self.assertEqual(sed_datagram_count, actual_connectivity.sed_datagram_count)
def test_should_create_Connectivity_without_sed_data_when_parse_method_is_called(self):
# GIVEN
pp = any_pp()
link_quality_3 = any_link_quality_3()
link_quality_2 = any_link_quality_2()
link_quality_1 = any_link_quality_1()
leader_cost = any_leader_cost()
id_sequence = any_id_sequence()
active_routers = any_active_routers()
sed_buffer_size = any_sed_buffer_size()
sed_datagram_count = any_sed_datagram_count()
factory = mle.ConnectivityFactory()
data = bytearray([pp, link_quality_3, link_quality_2, link_quality_1, leader_cost, id_sequence,
active_routers])
# WHEN
actual_connectivity = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_connectivity, mle.Connectivity))
self.assertEqual(pp, actual_connectivity.pp)
self.assertEqual(link_quality_3, actual_connectivity.link_quality_3)
self.assertEqual(link_quality_2, actual_connectivity.link_quality_2)
self.assertEqual(link_quality_1, actual_connectivity.link_quality_1)
self.assertEqual(leader_cost, actual_connectivity.leader_cost)
self.assertEqual(id_sequence, actual_connectivity.id_sequence)
self.assertEqual(active_routers, actual_connectivity.active_routers)
self.assertEqual(None, actual_connectivity.sed_buffer_size)
self.assertEqual(None, actual_connectivity.sed_datagram_count)
class TestLinkMargin(unittest.TestCase):
def test_should_return_link_margin_value_when_link_margin_property_is_called(self):
# GIVEN
link_margin = any_link_margin()
link_margin_obj = mle.LinkMargin(link_margin)
# WHEN
actual_link_margin = link_margin_obj.link_margin
# THEN
self.assertEqual(link_margin, actual_link_margin)
class TestLinkMarginFactory(unittest.TestCase):
def test_should_create_LinkMargin_from_bytearray_when_parse_method_is_called(self):
# GIVEN
link_margin = any_link_margin()
factory = mle.LinkMarginFactory()
data = bytearray([link_margin])
# WHEN
actual_link_margin = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_link_margin, mle.LinkMargin))
self.assertEqual(link_margin, actual_link_margin.link_margin)
class TestStatus(unittest.TestCase):
def test_should_return_status_value_when_status_property_is_called(self):
# GIVEN
status = any_status()
status_obj = mle.Status(status)
# WHEN
actual_status = status_obj.status
# THEN
self.assertEqual(status, actual_status)
class TestStatusFactory(unittest.TestCase):
def test_should_create_Status_from_bytearray_when_parse_method_is_called(self):
# GIVEN
status = any_status()
factory = mle.StatusFactory()
data = bytearray([status])
# WHEN
actual_status = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_status, mle.Status))
self.assertEqual(status, actual_status.status)
class TestVersion(unittest.TestCase):
def test_should_return_version_value_when_version_property_is_called(self):
# GIVEN
version = any_version()
version_obj = mle.Version(version)
# WHEN
actual_version = version_obj.version
# THEN
self.assertEqual(version, actual_version)
class TestVersionFactory(unittest.TestCase):
def test_should_create_Version_from_bytearray_when_parse_method_is_called(self):
# GIVEN
version = any_version()
factory = mle.VersionFactory()
data = struct.pack(">H", version)
# WHEN
actual_version = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_version, mle.Version))
self.assertEqual(version, actual_version.version)
class TestAddressRegistrationFull(unittest.TestCase):
def test_should_return_ipv6_address_value_when_ipv6_address_property_is_called(self):
# GIVEN
ipv6_address = any_ipv6_address()
addr_reg_full_obj = mle.AddressFull(ipv6_address)
# WHEN
actual_ipv6_address = addr_reg_full_obj.ipv6_address
# THEN
self.assertEqual(ipv6_address, actual_ipv6_address)
class TestAddressRegistrationFullFactory(unittest.TestCase):
def test_should_create_AddressFull_from_bytearray_when_parse_method_is_called(self):
# GIVEN
ipv6_address = any_ipv6_address()
factory = mle.AddressFullFactory()
data = bytearray([0x00]) + bytearray(ipv6_address)
# WHEN
actual_addr_reg_full = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_addr_reg_full, mle.AddressFull))
self.assertEqual(ipv6_address, actual_addr_reg_full.ipv6_address)
class TestAddressRegistrationCompressed(unittest.TestCase):
def test_should_return_cid_value_when_cid_property_is_called(self):
# GIVEN
cid = any_cid()
addr_reg_compressed_obj = mle.AddressCompressed(cid, any_iid())
# WHEN
actual_cid = addr_reg_compressed_obj.cid
# THEN
self.assertEqual(cid, actual_cid)
def test_should_return_cid_value_when_iid_property_is_called(self):
# GIVEN
iid = any_iid()
addr_reg_compressed_obj = mle.AddressCompressed(any_cid(), iid)
# WHEN
actual_iid = addr_reg_compressed_obj.iid
# THEN
self.assertEqual(iid, actual_iid)
class TestAddressRegistrationCompressedFactory(unittest.TestCase):
def test_should_create_AddressRegistrationCompressed_from_bytearray_when_parse_method_is_called(self):
# GIVEN
cid = any_cid()
iid = any_iid()
factory = mle.AddressCompressedFactory()
data = bytearray([(1 << 7) | cid]) + iid
# WHEN
actual_addr_reg_compressed = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_addr_reg_compressed, mle.AddressCompressed))
self.assertEqual(cid, actual_addr_reg_compressed.cid)
self.assertEqual(iid, actual_addr_reg_compressed.iid)
class TestAddressRegistration(unittest.TestCase):
def test_should_return_addresses_value_when_addresses_property_is_called(self):
# GIVEN
addresses = any_addresses()
addr_reg_obj = mle.AddressRegistration(addresses)
# WHEN
actual_addresses = addr_reg_obj.addresses
# THEN
self.assertEqual(addresses, actual_addresses)
class TestAddressRegistrationFactory(unittest.TestCase):
def test_should_create_AddressRegistration_from_bytearray_when_parse_method_is_called(self):
# GIVEN
cid = any_cid()
iid = any_iid()
ipv6_address = any_ipv6_address()
addresses = [
mle.AddressCompressed(cid, iid),
mle.AddressFull(ipv6_address)
]
factory = mle.AddressRegistrationFactory(mle.AddressCompressedFactory(),
mle.AddressFullFactory())
data = bytearray([(1 << 7) | cid]) + iid + bytearray([0]) + ipv6_address
# WHEN
actual_addr_reg = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_addr_reg, mle.AddressRegistration))
self.assertEqual(addresses[0].cid, actual_addr_reg.addresses[0].cid)
self.assertEqual(addresses[0].iid, actual_addr_reg.addresses[0].iid)
self.assertEqual(addresses[1].ipv6_address, actual_addr_reg.addresses[1].ipv6_address)
class TestChannel(unittest.TestCase):
def test_should_return_channel_page_value_when_channel_page_property_is_called(self):
# GIVEN
channel_page = any_channel_page()
channel_obj = mle.Channel(channel_page, any_channel())
# WHEN
actual_channel_page = channel_obj.channel_page
# THEN
self.assertEqual(channel_page, actual_channel_page)
def test_should_return_channel_value_when_channel_property_is_called(self):
# GIVEN
channel = any_channel()
channel_obj = mle.Channel(any_channel_page(), channel)
# WHEN
actual_channel = channel_obj.channel
# THEN
self.assertEqual(channel, actual_channel)
class TestChannelFactory(unittest.TestCase):
def test_should_create_Channel_from_bytearray_when_parse_method_is_called(self):
# GIVEN
channel_page = any_channel_page()
channel = any_channel()
factory = mle.ChannelFactory()
data = bytearray([channel_page]) + struct.pack(">H", channel)
# WHEN
actual_channel = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_channel, mle.Channel))
self.assertEqual(channel_page, actual_channel.channel_page)
self.assertEqual(channel, actual_channel.channel)
class TestPanId(unittest.TestCase):
def test_should_return_pan_id_value_when_pan_id_property_is_called(self):
# GIVEN
pan_id = any_pan_id()
pan_id_obj = mle.PanId(pan_id)
# WHEN
actual_pan_id = pan_id_obj.pan_id
# THEN
self.assertEqual(pan_id, actual_pan_id)
class TestPanIdFactory(unittest.TestCase):
def test_should_create_PanId_from_bytearray_when_parse_method_is_called(self):
# GIVEN
pan_id = any_pan_id()
factory = mle.PanIdFactory()
data = struct.pack(">H", pan_id)
# WHEN
actual_pan_id = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(actual_pan_id, mle.PanId))
self.assertEqual(pan_id, actual_pan_id.pan_id)
class TestActiveTimestamp(unittest.TestCase):
def test_should_return_timestamp_seconds_value_when_timestamp_seconds_property_is_called(self):
# GIVEN
timestamp_seconds = any_timestamp_seconds()
active_timestamp_obj = mle.ActiveTimestamp(timestamp_seconds, any_timestamp_ticks(), any_u())
# WHEN
actual_timestamp_seconds = active_timestamp_obj.timestamp_seconds
# THEN
self.assertEqual(timestamp_seconds, actual_timestamp_seconds)
def test_should_return_timestamp_ticks_value_when_timestamp_ticks_property_is_called(self):
# GIVEN
timestamp_ticks = any_timestamp_ticks()
active_timestamp_obj = mle.ActiveTimestamp(any_timestamp_seconds(), timestamp_ticks, any_u())
# WHEN
actual_timestamp_ticks = active_timestamp_obj.timestamp_ticks
# THEN
self.assertEqual(timestamp_ticks, actual_timestamp_ticks)
def test_should_return_u_value_when_u_property_is_called(self):
# GIVEN
u = any_u()
active_timestamp_obj = mle.ActiveTimestamp(any_timestamp_seconds(), any_timestamp_ticks(), u)
# WHEN
actual_u = active_timestamp_obj.u
# THEN
self.assertEqual(u, actual_u)
class TestActiveTimestampFactory(unittest.TestCase):
def test_should_create_ActiveTimestamp_from_bytearray_when_parse_method_is_called(self):
# GIVEN
timestamp_seconds = any_timestamp_seconds()
timestamp_ticks = any_timestamp_ticks()
u = any_u()
factory = mle.ActiveTimestampFactory()
data = struct.pack(">Q", timestamp_seconds)[2:] + struct.pack(">H", (timestamp_ticks << 1) | u)
# WHEN
active_timestamp = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(active_timestamp, mle.ActiveTimestamp))
self.assertEqual(timestamp_seconds, active_timestamp.timestamp_seconds)
self.assertEqual(timestamp_ticks, active_timestamp.timestamp_ticks)
self.assertEqual(u, active_timestamp.u)
class TestPendingTimestamp(unittest.TestCase):
def test_should_return_timestamp_seconds_value_when_timestamp_seconds_property_is_called(self):
# GIVEN
timestamp_seconds = any_timestamp_seconds()
pending_timestamp_obj = mle.PendingTimestamp(timestamp_seconds, any_timestamp_ticks(), any_u())
# WHEN
actual_timestamp_seconds = pending_timestamp_obj.timestamp_seconds
# THEN
self.assertEqual(timestamp_seconds, actual_timestamp_seconds)
def test_should_return_timestamp_ticks_value_when_timestamp_ticks_property_is_called(self):
# GIVEN
timestamp_ticks = any_timestamp_ticks()
pending_timestamp_obj = mle.PendingTimestamp(any_timestamp_seconds(), timestamp_ticks, any_u())
# WHEN
actual_timestamp_ticks = pending_timestamp_obj.timestamp_ticks
# THEN
self.assertEqual(timestamp_ticks, actual_timestamp_ticks)
def test_should_return_u_value_when_u_property_is_called(self):
# GIVEN
u = any_u()
pending_timestamp_obj = mle.PendingTimestamp(any_timestamp_seconds(), any_timestamp_ticks(), u)
# WHEN
actual_u = pending_timestamp_obj.u
# THEN
self.assertEqual(u, actual_u)
class TestPendingTimestampFactory(unittest.TestCase):
def test_should_create_PendingTimestamp_from_bytearray_when_parse_method_is_called(self):
# GIVEN
timestamp_seconds = any_timestamp_seconds()
timestamp_ticks = any_timestamp_ticks()
u = any_u()
factory = mle.PendingTimestampFactory()
data = struct.pack(">Q", timestamp_seconds)[2:] + struct.pack(">H", (timestamp_ticks << 1) | u)
# WHEN
pending_timestamp = factory.parse(io.BytesIO(data), dict())
# THEN
self.assertTrue(isinstance(pending_timestamp, mle.PendingTimestamp))
self.assertEqual(timestamp_seconds, pending_timestamp.timestamp_seconds)
self.assertEqual(timestamp_ticks, pending_timestamp.timestamp_ticks)
self.assertEqual(u, pending_timestamp.u)
class TestMleCommandFactory(unittest.TestCase):
def test_should_create_MleCommand_from_bytearray_when_parse_method_is_called(self):
data = bytearray([0x0b, 0x04, 0x08, 0xa5, 0xf2, 0x9b, 0xde, 0xe3,
0xd8, 0xbe, 0xb9, 0x05, 0x04, 0x00, 0x00, 0x00,
0x00, 0x08, 0x04, 0x00, 0x00, 0x00, 0x01, 0x01,
0x01, 0x0d, 0x02, 0x04, 0x00, 0x00, 0x00, 0xf0,
0x12, 0x02, 0x00, 0x02, 0x13, 0x09, 0x80, 0x86,
0xa2, 0x1b, 0x81, 0x6d, 0xb8, 0xb5, 0xe8, 0x0d,
0x03, 0x0a, 0x0c, 0x09])
factory = mle.MleCommandFactory(config.create_default_mle_tlvs_factories())
# WHEN
actual_mle_command = factory.parse(io.BytesIO(data), None)
# THEN
self.assertTrue(isinstance(actual_mle_command, mle.MleCommand))
self.assertEqual(11, actual_mle_command.type)
self.assertEqual(mle.Response(bytearray([0xa5, 0xf2, 0x9b, 0xde, 0xe3, 0xd8, 0xbe, 0xb9])),
actual_mle_command.tlvs[0])
self.assertEqual(mle.LinkLayerFrameCounter(0), actual_mle_command.tlvs[1])
self.assertEqual(mle.MleFrameCounter(1), actual_mle_command.tlvs[2])
self.assertEqual(mle.Mode(receiver=1, secure=1, device_type=0, network_data=1),
actual_mle_command.tlvs[3])
self.assertEqual(mle.Timeout(240), actual_mle_command.tlvs[4])
self.assertEqual(mle.Version(2), actual_mle_command.tlvs[5])
self.assertEqual(mle.AddressRegistration(addresses=[
mle.AddressCompressed(cid=0, iid=bytearray([0x86, 0xa2, 0x1b, 0x81, 0x6d, 0xb8, 0xb5, 0xe8]))]),
actual_mle_command.tlvs[6])
self.assertEqual(mle.TlvRequest(tlvs=[10, 12, 9]), actual_mle_command.tlvs[7])
class TestMleMessageFactory(unittest.TestCase):
def test_should_create_MleMessageSecured_from_bytearray_when_parse_method_is_called(self):
# GIVEN
message_info = common.MessageInfo()
message_info.source_ipv6 = "fe80::10cf:d38b:3b61:5558"
message_info.destination_ipv6 = "fe80::383e:9eed:7a01:36a5"
message_info.source_mac_address = common.MacAddress.from_eui64(
bytearray([0x12, 0xcf, 0xd3, 0x8b, 0x3b, 0x61, 0x55, 0x58]))
message_info.destination_mac_address = common.MacAddress.from_eui64(
bytearray([0x3a, 0x3e, 0x9e, 0xed, 0x7a, 0x01, 0x36, 0xa5]))
data = bytearray([0x00, 0x15, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x14, 0x03, 0xe3, 0x72, 0x50, 0x4f,
0x8c, 0x5c, 0x42, 0x81, 0x68, 0xe2, 0x11, 0xfc,
0xf5, 0x8c, 0x62, 0x8e, 0x83, 0x99, 0xe7, 0x26,
0x86, 0x34, 0x3b, 0xa7, 0x68, 0xc7, 0x93, 0xfb,
0x72, 0xd9, 0xcc, 0x13, 0x5e, 0x5b, 0x96, 0x0e,
0xf1, 0x80, 0x03, 0x55, 0x4f, 0x27, 0xc2, 0x96,
0xf4, 0x9c, 0x65, 0x82, 0x97, 0xcf, 0x97, 0x35,
0x89, 0xc2])
factory = config.create_default_mle_message_factory(master_key=config.DEFAULT_MASTER_KEY)
# WHEN
actual_mle_message = factory.parse(io.BytesIO(data), message_info)
# THEN
self.assertTrue(isinstance(actual_mle_message, mle.MleMessageSecured))
self.assertEqual(11, actual_mle_message.command.type)
self.assertEqual(mle.Response(bytearray([0xa5, 0xf2, 0x9b, 0xde, 0xe3, 0xd8, 0xbe, 0xb9])),
actual_mle_message.command.tlvs[0])
self.assertEqual(mle.LinkLayerFrameCounter(0), actual_mle_message.command.tlvs[1])
self.assertEqual(mle.MleFrameCounter(1), actual_mle_message.command.tlvs[2])
self.assertEqual(mle.Mode(receiver=1, secure=1, device_type=0, network_data=1),
actual_mle_message.command.tlvs[3])
self.assertEqual(mle.Timeout(240), actual_mle_message.command.tlvs[4])
self.assertEqual(mle.Version(2), actual_mle_message.command.tlvs[5])
self.assertEqual(mle.AddressRegistration(addresses=[
mle.AddressCompressed(cid=0, iid=bytearray([0x86, 0xa2, 0x1b, 0x81, 0x6d, 0xb8, 0xb5, 0xe8]))]),
actual_mle_message.command.tlvs[6])
self.assertEqual(mle.TlvRequest(tlvs=[10, 12, 9]), actual_mle_message.command.tlvs[7])
self.assertEqual(bytearray(data[-4:]), actual_mle_message.mic)
def test_should_create_MleMessageSecured_with_MLE_Data_Response_from_bytearray_when_parse_method_is_called(self):
# GIVEN
message_info = common.MessageInfo()
message_info.source_ipv6 = "fe80::241c:b11b:7b62:caf1"
message_info.destination_ipv6 = "ff02::1"
message_info.source_mac_address = common.MacAddress.from_eui64(
bytearray([0x26, 0x1c, 0xb1, 0x1b, 0x7b, 0x62, 0xca, 0xf1]))
message_info.destination_mac_address = common.MacAddress.from_eui64(
bytearray([0x3a, 0xba, 0xad, 0xca, 0xfe, 0xde, 0xff, 0xa5]))
data = bytearray([0x00, 0x15, 0x15, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x01, 0xca, 0xd3, 0x45, 0xe2, 0x35,
0x1d, 0x00, 0x2d, 0x72, 0x71, 0xb1, 0x19, 0xaf,
0x8b, 0x05, 0xd9, 0x52, 0x74, 0xce, 0xe6, 0x36,
0x53, 0xeb, 0xc6, 0x25, 0x94, 0x01, 0x6d, 0x20,
0xdf, 0x30, 0x82, 0xf8, 0xbb, 0x34, 0x47, 0x42,
0x50, 0xe9, 0x41, 0xa7, 0x33, 0xa5])
factory = config.create_default_mle_message_factory(master_key=config.DEFAULT_MASTER_KEY)
# WHEN
actual_mle_message = factory.parse(io.BytesIO(data), message_info)
# THEN
self.assertTrue(isinstance(actual_mle_message, mle.MleMessageSecured))
self.assertEqual(8, actual_mle_message.command.type)
self.assertEqual(mle.SourceAddress(address=0x9400), actual_mle_message.command.tlvs[0])
self.assertEqual(mle.LeaderData(
partition_id=0x06d014ca,
weighting=64,
data_version=131,
stable_data_version=168,
leader_router_id=37
), actual_mle_message.command.tlvs[1])
self.assertEqual(mle.NetworkData(tlvs=[
network_data.Prefix(
domain_id=0,
prefix_length=64,
prefix=bytearray([0x12, 0x34, 0x12, 0x34, 0x12, 0x34, 0x12, 0x34]),
sub_tlvs=[
network_data.LowpanId(c=1, cid=1, context_length=64, stable=1),
network_data.BorderRouter(border_router_16=37888, prf=0, p=1,
s=1, d=0, c=0, r=1, o=1, n=0, stable=1)
],
stable=1
)
]), actual_mle_message.command.tlvs[2])
self.assertEqual(bytearray(data[-4:]), actual_mle_message.mic)
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python3
# Tests check_format.py. This must be run in a context where the clang
# version and settings are compatible with the one in the Envoy
# docker. Normally this is run via check_format_test.sh, which
# executes it in under docker.
from __future__ import print_function
from run_command import run_command
import argparse
import logging
import os
import shutil
import sys
import tempfile
curr_dir = os.path.dirname(os.path.realpath(__file__))
tools = os.path.dirname(curr_dir)
src = os.path.join(tools, 'testdata', 'check_format')
check_format = sys.executable + " " + os.path.join(curr_dir, 'check_format.py')
errors = 0
# Runs the 'check_format' operation, on the specified file, printing
# the comamnd run and the status code as well as the stdout, and returning
# all of that to the caller.
def run_check_format(operation, filename):
command = check_format + " " + operation + " " + filename
status, stdout, stderr = run_command(command)
return (command, status, stdout + stderr)
def get_input_file(filename, extra_input_files=None):
files_to_copy = [filename]
if extra_input_files is not None:
files_to_copy.extend(extra_input_files)
for f in files_to_copy:
infile = os.path.join(src, f)
directory = os.path.dirname(f)
if not directory == '' and not os.path.isdir(directory):
os.makedirs(directory)
shutil.copyfile(infile, f)
return filename
# Attempts to fix file, returning a 4-tuple: the command, input file name,
# output filename, captured stdout as an array of lines, and the error status
# code.
def fix_file_helper(filename, extra_input_files=None):
command, status, stdout = run_check_format(
"fix", get_input_file(filename, extra_input_files=extra_input_files))
infile = os.path.join(src, filename)
return command, infile, filename, status, stdout
# Attempts to fix a file, returning the status code and the generated output.
# If the fix was successful, the diff is returned as a string-array. If the file
# was not fixable, the error-messages are returned as a string-array.
def fix_file_expecting_success(file, extra_input_files=None):
command, infile, outfile, status, stdout = fix_file_helper(
file, extra_input_files=extra_input_files)
if status != 0:
print("FAILED: " + infile)
emit_stdout_as_error(stdout)
return 1
status, stdout, stderr = run_command('diff ' + outfile + ' ' + infile + '.gold')
if status != 0:
print("FAILED: " + infile)
emit_stdout_as_error(stdout + stderr)
return 1
return 0
def fix_file_expecting_no_change(file):
command, infile, outfile, status, stdout = fix_file_helper(file)
if status != 0:
return 1
status, stdout, stderr = run_command('diff ' + outfile + ' ' + infile)
if status != 0:
logging.error(file + ': expected file to remain unchanged')
return 1
return 0
def emit_stdout_as_error(stdout):
logging.error("\n".join(stdout))
def expect_error(filename, status, stdout, expected_substring):
if status == 0:
logging.error("%s: Expected failure `%s`, but succeeded" % (filename, expected_substring))
return 1
for line in stdout:
if expected_substring in line:
return 0
logging.error("%s: Could not find '%s' in:\n" % (filename, expected_substring))
emit_stdout_as_error(stdout)
return 1
def fix_file_expecting_failure(filename, expected_substring):
command, infile, outfile, status, stdout = fix_file_helper(filename)
return expect_error(filename, status, stdout, expected_substring)
def check_file_expecting_error(filename, expected_substring, extra_input_files=None):
command, status, stdout = run_check_format(
"check", get_input_file(filename, extra_input_files=extra_input_files))
return expect_error(filename, status, stdout, expected_substring)
def check_and_fix_error(filename, expected_substring, extra_input_files=None):
errors = check_file_expecting_error(
filename, expected_substring, extra_input_files=extra_input_files)
errors += fix_file_expecting_success(filename, extra_input_files=extra_input_files)
return errors
def check_tool_not_found_error():
# Temporarily change PATH to test the error about lack of external tools.
oldPath = os.environ["PATH"]
os.environ["PATH"] = "/sbin:/usr/sbin"
clang_format = os.getenv("CLANG_FORMAT", "clang-format-11")
# If CLANG_FORMAT points directly to the binary, skip this test.
if os.path.isfile(clang_format) and os.access(clang_format, os.X_OK):
os.environ["PATH"] = oldPath
return 0
errors = check_file_expecting_error(
"no_namespace_envoy.cc", "Command %s not found." % clang_format)
os.environ["PATH"] = oldPath
return errors
def check_unfixable_error(filename, expected_substring):
errors = check_file_expecting_error(filename, expected_substring)
errors += fix_file_expecting_failure(filename, expected_substring)
return errors
def check_file_expecting_ok(filename):
command, status, stdout = run_check_format("check", get_input_file(filename))
if status != 0:
logging.error("Expected %s to have no errors; status=%d, output:\n" % (filename, status))
emit_stdout_as_error(stdout)
return status + fix_file_expecting_no_change(filename)
def run_checks():
errors = 0
# The following error is the error about unavailability of external tools.
errors += check_tool_not_found_error()
# The following errors can be detected but not fixed automatically.
errors += check_unfixable_error(
"no_namespace_envoy.cc", "Unable to find Envoy namespace or NOLINT(namespace-envoy)")
errors += check_unfixable_error("mutex.cc", "Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error(
"condition_variable.cc", "Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error(
"condition_variable_any.cc", "Don't use <mutex> or <condition_variable*>")
errors += check_unfixable_error("shared_mutex.cc", "shared_mutex")
errors += check_unfixable_error("shared_mutex.cc", "shared_mutex")
real_time_inject_error = (
"Don't reference real-world time sources; use TimeSystem::advanceTime(Wait|Async)")
errors += check_unfixable_error("real_time_source.cc", real_time_inject_error)
errors += check_unfixable_error("real_time_system.cc", real_time_inject_error)
errors += check_unfixable_error(
"duration_value.cc",
"Don't use ambiguous duration(value), use an explicit duration type, e.g. Event::TimeSystem::Milliseconds(value)"
)
errors += check_unfixable_error("system_clock.cc", real_time_inject_error)
errors += check_unfixable_error("steady_clock.cc", real_time_inject_error)
errors += check_unfixable_error(
"unpack_to.cc", "Don't use UnpackTo() directly, use MessageUtil::unpackTo() instead")
errors += check_unfixable_error(
"condvar_wait_for.cc", "Don't use CondVar::waitFor(); use TimeSystem::waitFor() instead.")
errors += check_unfixable_error("sleep.cc", real_time_inject_error)
errors += check_unfixable_error("std_atomic_free_functions.cc", "std::atomic_*")
errors += check_unfixable_error("std_get_time.cc", "std::get_time")
errors += check_unfixable_error(
"no_namespace_envoy.cc", "Unable to find Envoy namespace or NOLINT(namespace-envoy)")
errors += check_unfixable_error("bazel_tools.BUILD", "unexpected @bazel_tools reference")
errors += check_unfixable_error(
"proto.BUILD", "unexpected direct external dependency on protobuf")
errors += check_unfixable_error(
"proto_deps.cc", "unexpected direct dependency on google.protobuf")
errors += check_unfixable_error("attribute_packed.cc", "Don't use __attribute__((packed))")
errors += check_unfixable_error(
"designated_initializers.cc", "Don't use designated initializers")
errors += check_unfixable_error("elvis_operator.cc", "Don't use the '?:' operator")
errors += check_unfixable_error(
"testing_test.cc", "Don't use 'using testing::Test;, elaborate the type instead")
errors += check_unfixable_error(
"serialize_as_string.cc",
"Don't use MessageLite::SerializeAsString for generating deterministic serialization")
errors += check_unfixable_error(
"counter_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"gauge_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"histogram_from_string.cc",
"Don't lookup stats by name at runtime; use StatName saved during construction")
errors += check_unfixable_error(
"regex.cc", "Don't use std::regex in code that handles untrusted input. Use RegexMatcher")
errors += check_unfixable_error(
"grpc_init.cc",
"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. "
+ "See #8282")
errors += check_unfixable_error(
"grpc_shutdown.cc",
"Don't call grpc_init() or grpc_shutdown() directly, instantiate Grpc::GoogleGrpcContext. "
+ "See #8282")
errors += check_unfixable_error(
"source/raw_try.cc",
"Don't use raw try, use TRY_ASSERT_MAIN_THREAD if on the main thread otherwise don't use exceptions."
)
errors += check_unfixable_error("clang_format_double_off.cc", "clang-format nested off")
errors += check_unfixable_error("clang_format_trailing_off.cc", "clang-format remains off")
errors += check_unfixable_error("clang_format_double_on.cc", "clang-format nested on")
errors += fix_file_expecting_failure(
"api/missing_package.proto",
"Unable to find package name for proto file: ./api/missing_package.proto")
errors += check_unfixable_error(
"proto_enum_mangling.cc", "Don't use mangled Protobuf names for enum constants")
errors += check_unfixable_error(
"test_naming.cc", "Test names should be CamelCase, starting with a capital letter")
errors += check_unfixable_error("mock_method_n.cc", "use MOCK_METHOD() instead")
errors += check_unfixable_error("for_each_n.cc", "use an alternative for loop instead")
errors += check_unfixable_error(
"test/register_factory.cc",
"Don't use Registry::RegisterFactory or REGISTER_FACTORY in tests, use "
"Registry::InjectFactory instead.")
errors += check_unfixable_error(
"strerror.cc", "Don't use strerror; use Envoy::errorDetails instead")
errors += check_unfixable_error(
"std_unordered_map.cc", "Don't use std::unordered_map; use absl::flat_hash_map instead "
+ "or absl::node_hash_map if pointer stability of keys/values is required")
errors += check_unfixable_error(
"std_unordered_set.cc", "Don't use std::unordered_set; use absl::flat_hash_set instead "
+ "or absl::node_hash_set if pointer stability of keys/values is required")
errors += check_unfixable_error("std_any.cc", "Don't use std::any; use absl::any instead")
errors += check_unfixable_error(
"std_get_if.cc", "Don't use std::get_if; use absl::get_if instead")
errors += check_unfixable_error(
"std_holds_alternative.cc",
"Don't use std::holds_alternative; use absl::holds_alternative instead")
errors += check_unfixable_error(
"std_make_optional.cc", "Don't use std::make_optional; use absl::make_optional instead")
errors += check_unfixable_error(
"std_monostate.cc", "Don't use std::monostate; use absl::monostate instead")
errors += check_unfixable_error(
"std_optional.cc", "Don't use std::optional; use absl::optional instead")
errors += check_unfixable_error(
"std_string_view.cc",
"Don't use std::string_view or toStdStringView; use absl::string_view instead")
errors += check_unfixable_error(
"std_variant.cc", "Don't use std::variant; use absl::variant instead")
errors += check_unfixable_error("std_visit.cc", "Don't use std::visit; use absl::visit instead")
errors += check_unfixable_error(
"throw.cc", "Don't introduce throws into exception-free files, use error statuses instead.")
errors += check_unfixable_error("pgv_string.proto", "min_bytes is DEPRECATED, Use min_len.")
errors += check_file_expecting_ok("commented_throw.cc")
errors += check_unfixable_error(
"repository_url.bzl", "Only repository_locations.bzl may contains URL references")
errors += check_unfixable_error(
"repository_urls.bzl", "Only repository_locations.bzl may contains URL references")
# The following files have errors that can be automatically fixed.
errors += check_and_fix_error(
"over_enthusiastic_spaces.cc", "./over_enthusiastic_spaces.cc:3: over-enthusiastic spaces")
errors += check_and_fix_error(
"extra_enthusiastic_spaces.cc",
"./extra_enthusiastic_spaces.cc:3: over-enthusiastic spaces")
errors += check_and_fix_error(
"angle_bracket_include.cc", "envoy includes should not have angle brackets")
errors += check_and_fix_error("proto_style.cc", "incorrect protobuf type reference")
errors += check_and_fix_error("long_line.cc", "clang-format check failed")
errors += check_and_fix_error("header_order.cc", "header_order.py check failed")
errors += check_and_fix_error(
"clang_format_on.cc", "./clang_format_on.cc:7: over-enthusiastic spaces")
# Validate that a missing license is added.
errors += check_and_fix_error("license.BUILD", "envoy_build_fixer check failed")
# Validate that an incorrect license is replaced and reordered.
errors += check_and_fix_error("update_license.BUILD", "envoy_build_fixer check failed")
# Validate that envoy_package() is added where there is an envoy_* rule occurring.
errors += check_and_fix_error("add_envoy_package.BUILD", "envoy_build_fixer check failed")
# Validate that we don't add envoy_package() when no envoy_* rule.
errors += check_file_expecting_ok("skip_envoy_package.BUILD")
# Validate that we clean up gratuitous blank lines.
errors += check_and_fix_error("canonical_spacing.BUILD", "envoy_build_fixer check failed")
# Validate that unused loads are removed.
errors += check_and_fix_error("remove_unused_loads.BUILD", "envoy_build_fixer check failed")
# Validate that API proto package deps are computed automagically.
errors += check_and_fix_error(
"canonical_api_deps.BUILD",
"envoy_build_fixer check failed",
extra_input_files=[
"canonical_api_deps.cc", "canonical_api_deps.h", "canonical_api_deps.other.cc"
])
errors += check_and_fix_error("bad_envoy_build_sys_ref.BUILD", "Superfluous '@envoy//' prefix")
errors += check_and_fix_error("proto_format.proto", "clang-format check failed")
errors += check_and_fix_error(
"cpp_std.cc",
"term absl::make_unique< should be replaced with standard library term std::make_unique<")
errors += check_and_fix_error(
"code_conventions.cc", "term .Times(1); should be replaced with preferred term ;")
errors += check_file_expecting_ok("real_time_source_override.cc")
errors += check_file_expecting_ok("duration_value_zero.cc")
errors += check_file_expecting_ok("time_system_wait_for.cc")
errors += check_file_expecting_ok("clang_format_off.cc")
return errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='tester for check_format.py.')
parser.add_argument('--log', choices=['INFO', 'WARN', 'ERROR'], default='INFO')
args = parser.parse_args()
logging.basicConfig(format='%(message)s', level=args.log)
# Now create a temp directory to copy the input files, so we can fix them
# without actually fixing our testdata. This requires chdiring to the temp
# directory, so it's annoying to comingle check-tests and fix-tests.
with tempfile.TemporaryDirectory() as tmp:
os.chdir(tmp)
errors = run_checks()
if errors != 0:
logging.error("%d FAILURES" % errors)
exit(1)
logging.warning("PASS")
| |
from django.contrib.contenttypes.models import ContentType
from django_dynamic_fixture import G, N
from django.test import TestCase
from entity.sync import sync_entities
from entity.signal_handlers import turn_off_syncing, turn_on_syncing
from entity.models import (
Entity, EntityKind, EntityRelationship, EntityGroup, EntityGroupMembership, get_entities_by_kind
)
from entity.tests.models import Account, Team, TeamGroup, Competitor
from entity.tests.utils import EntityTestCase
class EntityKindManagerTest(EntityTestCase):
"""
Tests the active and all entity kind managers.
"""
def test_only_active(self):
G(EntityKind, is_active=False)
active_ek = G(EntityKind, is_active=True)
self.assertEquals([active_ek], list(EntityKind.objects.all()))
def test_all_objects(self):
inactive_ek = G(EntityKind, is_active=False)
active_ek = G(EntityKind, is_active=True)
self.assertEquals(set([active_ek, inactive_ek]), set(EntityKind.all_objects.all()))
class EntityKindTest(EntityTestCase):
"""
Tests the EntityKind model.
"""
def test_unicode(self):
ek = N(EntityKind, display_name='hello')
self.assertEquals(u'{0}'.format(ek), u'hello')
def test_regular_delete(self):
"""
Regular deletion should deactivate the entity kind.
"""
ek = G(EntityKind, is_active=True)
ek.delete()
self.assertFalse(ek.is_active)
class TestActiveEntityManager(EntityTestCase):
def test_filters_active_by_default(self):
"""
Tests that active entities are returned by default when accessing Entity.objects
"""
e = G(Entity, is_active=True)
G(Entity, is_active=False)
self.assertEquals([e], list(Entity.objects.all()))
class TestAllEntityManager(EntityTestCase):
"""
Tests custom function in the AllEntityManager class.
"""
def setUp(self):
super(TestAllEntityManager, self).setUp()
self.account_type = ContentType.objects.get_for_model(Account)
self.account_kind = G(EntityKind, name='tests.account')
self.team_type = ContentType.objects.get_for_model(Team)
self.team_kind = G(EntityKind, name='tests.team')
self.team_group_type = ContentType.objects.get_for_model(TeamGroup)
self.team_group_kind = G(EntityKind, name='tests.teamgroup')
self.competitor_type = ContentType.objects.get_for_model(Competitor)
self.competitor_kind = G(EntityKind, name='tests.competitor')
def test_manager_cache_relationships(self):
"""
Tests a retrieval of cache relationships on the manager and verifies it results in the smallest amount of
queries
"""
team = Team.objects.create()
for i in range(5):
Account.objects.create(team=team)
# Five queries should happen here - one for all entities, two for EntityRelationships,
# and two more for entities in the relationships
with self.assertNumQueries(5):
entities = Entity.objects.cache_relationships()
for entity in entities:
self.assertTrue(len(list(entity.get_super_entities())) >= 0)
self.assertTrue(len(list(entity.get_sub_entities())) >= 0)
self.assertEquals(entities.count(), 6)
def test_manager_cache_relationships_only_sub(self):
"""
Tests a retrieval of cache relationships on the manager and verifies it results in the smallest amount of
queries when only caching sub entities.
"""
team = Team.objects.create()
for i in range(5):
Account.objects.create(team=team)
# Five queries should happen here - one for all entities, two for EntityRelationships,
# and two more for entities in the relationships
with self.assertNumQueries(3):
entities = Entity.objects.cache_relationships(cache_super=False)
for entity in entities:
self.assertTrue(len(list(entity.get_sub_entities())) >= 0)
self.assertEquals(entities.count(), 6)
def test_manager_cache_relationships_only_super(self):
"""
Tests a retrieval of cache relationships on the manager and verifies it results in the smallest amount of
queries when only caching super entities.
"""
team = Team.objects.create()
for i in range(5):
Account.objects.create(team=team)
# Five queries should happen here - one for all entities, two for EntityRelationships,
# and two more for entities in the relationships
with self.assertNumQueries(3):
entities = Entity.objects.cache_relationships(cache_sub=False)
for entity in entities:
self.assertTrue(len(list(entity.get_super_entities())) >= 0)
self.assertEquals(entities.count(), 6)
def test_manager_cache_relationships_none(self):
"""
Tests a retrieval of cache relationships on the manager and verifies it results in the smallest amount of
queries when super and sub are set to false
"""
team = Team.objects.create()
for i in range(5):
Account.objects.create(team=team)
# Five queries should happen here - one for all entities, two for EntityRelationships,
# and two more for entities in the relationships
with self.assertNumQueries(1):
entities = Entity.objects.cache_relationships(cache_sub=False, cache_super=False)
self.assertTrue(len(entities) > 0)
self.assertEquals(entities.count(), 6)
def test_queryset_cache_relationships(self):
"""
Tests a retrieval of cache relationships on the queryset and verifies it results in the smallest amount of
queries
"""
team = Team.objects.create()
for i in range(5):
Account.objects.create(team=team)
entity_ids = [i.id for i in Entity.objects.all()]
# Five queries should happen here - 1 for the Entity filter, two for EntityRelationships, and two more
# for entities in those relationships
with self.assertNumQueries(5):
entities = Entity.objects.filter(id__in=entity_ids).cache_relationships()
for entity in entities:
self.assertTrue(len(list(entity.get_super_entities())) >= 0)
self.assertTrue(len(list(entity.get_sub_entities())) >= 0)
self.assertEquals(entities.count(), 6)
def test_cache_subset(self):
"""
Tests that caching still operates the same on an entity subset call.
"""
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
for i in range(5):
Account.objects.create(team=team)
# Five queries should happen here - 1 for the Entity filter, two for EntityRelationships, and one more
# for entities in those relationships (since no sub relationships exist)
with self.assertNumQueries(4):
entities = Entity.objects.is_sub_to_all(team_entity).cache_relationships()
for entity in entities:
self.assertTrue(len(list(entity.get_super_entities())) == 1)
self.assertTrue(len(list(entity.get_sub_entities())) == 0)
self.assertEquals(len(entities), 5)
def test_get_for_obj(self):
"""
Test retrieving an entity associated with an object.
"""
# Create an account with no team
account = Account.objects.create()
# Get its resulting entity
entity = Entity.objects.get(entity_type=ContentType.objects.get_for_model(account), entity_id=account.id)
self.assertEquals(entity, Entity.objects.get_for_obj(account))
def test_filter_manager_active(self):
"""
Test filtering active entities directly from the manager.
"""
# Create an active and inactive account
account = Account.objects.create()
Account.objects.create(is_active=False)
# Get its resulting entity
entity = Entity.objects.get_for_obj(account)
self.assertEquals([entity], list(Entity.objects.active()))
def test_filter_manager_inactive(self):
"""
Test filtering inactive entities directly from the manager.
"""
# Create an active and inactive account
account = Account.objects.create()
account = Account.objects.create(is_active=False)
# Get its resulting entity
entity = Entity.all_objects.get_for_obj(account)
self.assertEquals([entity], list(Entity.all_objects.inactive()))
def test_filter_queryset_active(self):
"""
Test filtering active entities from a queryset.
"""
# Create an active and inactive account
active_entity = Entity.objects.get_for_obj(Account.objects.create())
inactive_entity = Entity.all_objects.get_for_obj(Account.objects.create(is_active=False))
self.assertEquals(
[active_entity], list(Entity.objects.filter(id__in=[active_entity.id, inactive_entity.id]).active()))
def test_filter_queryset_inactive(self):
"""
Test filtering inactive entities from a queryset.
"""
# Create an active and inactive account
active_entity = Entity.objects.get_for_obj(Account.objects.create())
inactive_entity = Entity.all_objects.get_for_obj(Account.objects.create(is_active=False))
self.assertEquals(
[inactive_entity],
list(Entity.all_objects.filter(id__in=[active_entity.id, inactive_entity.id]).inactive()))
def test_filter_manager_is_kind_none(self):
"""
Tests filtering by entity kind when no kind is given.
"""
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
account_entities = [
Entity.objects.get_for_obj(Account.objects.create(team=team))
for i in range(5)
]
self.assertEquals(set([team_entity] + account_entities), set(Entity.objects.is_any_kind()))
def test_filter_manager_one_kind(self):
"""
Tests filtering by entity kind when one kind is given.
"""
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
account_entities = set(
Entity.objects.get_for_obj(Account.objects.create(team=team))
for i in range(5)
)
self.assertEquals([team_entity], list(Entity.objects.is_any_kind(self.team_kind)))
self.assertEquals(account_entities, set(Entity.objects.is_any_kind(self.account_kind)))
def test_filter_manager_two_kinds(self):
"""
Tests filtering by entity kind when two kinds are given.
"""
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
account_entities = set(
Entity.objects.get_for_obj(Account.objects.create(team=team))
for i in range(5)
)
self.assertEquals(
account_entities.union([team_entity]), set(Entity.objects.is_any_kind(self.account_kind, self.team_kind)))
def test_is_sub_to_all_kinds_none(self):
# set up teams, groups and competitors
team = Team.objects.create()
group = TeamGroup.objects.create(name='group')
competitor = Competitor.objects.create(name='competitor')
# set up players with different compinations of team, group and competitor
Account.objects.create(email='team_only', team=team)
Account.objects.create(email='team_competitor', team=team, competitor=competitor)
Account.objects.create(email='team_group', team=team, team_group=group)
Account.objects.create(email='group_only', team_group=group)
Account.objects.create(email='group_competitor', team_group=group, competitor=competitor)
Account.objects.create(email='competitor_only', competitor=competitor)
sorted_names = sorted([e.display_name for e in Entity.objects.is_sub_to_all_kinds()])
expected_names = [
u'competitor', u'competitor_only', u'group', u'group_competitor',
u'group_only', u'team', u'team_competitor', u'team_group', u'team_only'
]
self.assertEqual(sorted_names, expected_names)
def test_is_sub_to_all_kinds_double(self):
# set up teams, groups and competitors
team = Team.objects.create()
group = TeamGroup.objects.create(name='group')
competitor = Competitor.objects.create(name='competitor')
# set up players with different compinations of team, group and competitor
Account.objects.create(email='team_only', team=team)
Account.objects.create(email='team_competitor', team=team, competitor=competitor)
Account.objects.create(email='team_group', team=team, team_group=group)
Account.objects.create(email='group_only', team_group=group)
Account.objects.create(email='group_competitor', team_group=group, competitor=competitor)
Account.objects.create(email='competitor_only', competitor=competitor)
# get kind model(s)
team_kind = EntityKind.objects.get(name='tests.team')
group_kind = EntityKind.objects.get(name='tests.teamgroup')
sorted_names = sorted([e.display_name for e in Entity.objects.is_sub_to_all_kinds(group_kind, team_kind)])
expected_names = [u'team_group']
self.assertEqual(sorted_names, expected_names)
def test_is_sub_to_all_kinds_single(self):
# set up teams, groups and competitors
team = Team.objects.create()
group = TeamGroup.objects.create(name='group')
competitor = Competitor.objects.create(name='competitor')
# set up players with different compinations of team, group and competitor
Account.objects.create(email='team_only', team=team)
Account.objects.create(email='team_competitor', team=team, competitor=competitor)
Account.objects.create(email='team_group', team=team, team_group=group)
Account.objects.create(email='group_only', team_group=group)
Account.objects.create(email='group_competitor', team_group=group, competitor=competitor)
Account.objects.create(email='competitor_only', competitor=competitor)
# get kind model(s)
group_kind = EntityKind.objects.get(name='tests.teamgroup')
sorted_names = sorted([e.display_name for e in Entity.objects.is_sub_to_all_kinds(group_kind)])
expected_names = [u'group_competitor', u'group_only', u'team_group']
self.assertEqual(sorted_names, expected_names)
def test_is_sub_to_any_kind_none(self):
# set up teams, groups and competitors
team = Team.objects.create()
group = TeamGroup.objects.create(name='group')
competitor = Competitor.objects.create(name='competitor')
# set up players with different compinations of team, group and competitor
Account.objects.create(email='team_only', team=team)
Account.objects.create(email='team_competitor', team=team, competitor=competitor)
Account.objects.create(email='team_group', team=team, team_group=group)
Account.objects.create(email='group_only', team_group=group)
Account.objects.create(email='group_competitor', team_group=group, competitor=competitor)
Account.objects.create(email='competitor_only', competitor=competitor)
sorted_names = sorted([e.display_name for e in Entity.objects.is_sub_to_any_kind()])
expected_names = [
u'competitor', u'competitor_only', u'group', u'group_competitor',
u'group_only', u'team', u'team_competitor', u'team_group', u'team_only'
]
self.assertEqual(sorted_names, expected_names)
def test_is_sub_to_any_kind_single(self):
# set up teams, groups and competitors
team = Team.objects.create()
group = TeamGroup.objects.create(name='group')
competitor = Competitor.objects.create(name='competitor')
# set up players with different compinations of team, group and competitor
Account.objects.create(email='team_only', team=team)
Account.objects.create(email='team_competitor', team=team, competitor=competitor)
Account.objects.create(email='team_group', team=team, team_group=group)
Account.objects.create(email='group_only', team_group=group)
Account.objects.create(email='group_competitor', team_group=group, competitor=competitor)
Account.objects.create(email='competitor_only', competitor=competitor)
# get kind model(s)
group_kind = EntityKind.objects.get(name='tests.teamgroup')
sorted_names = sorted([e.display_name for e in Entity.objects.is_sub_to_any_kind(group_kind)])
expected_names = [u'group_competitor', u'group_only', u'team_group']
self.assertEqual(sorted_names, expected_names)
def test_is_sub_to_any_kind_double(self):
# set up teams, groups and competitors
team = Team.objects.create()
group = TeamGroup.objects.create(name='group')
competitor = Competitor.objects.create(name='competitor')
# set up players with different compinations of team, group and competitor
Account.objects.create(email='team_only', team=team)
Account.objects.create(email='team_competitor', team=team, competitor=competitor)
Account.objects.create(email='team_group', team=team, team_group=group)
Account.objects.create(email='group_only', team_group=group)
Account.objects.create(email='group_competitor', team_group=group, competitor=competitor)
Account.objects.create(email='competitor_only', competitor=competitor)
# get kind model(s)
team_kind = EntityKind.objects.get(name='tests.team')
group_kind = EntityKind.objects.get(name='tests.teamgroup')
sorted_names = sorted([e.display_name for e in Entity.objects.is_sub_to_any_kind(team_kind, group_kind)])
expected_names = [u'group_competitor', u'group_only', u'team_competitor', u'team_group', u'team_only']
self.assertEqual(sorted_names, expected_names)
def test_filter_queryset_two_kinds(self):
"""
Tests filtering by entity kind when two kinds are given on a queryset.
"""
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
account_entities = set(
Entity.objects.get_for_obj(Account.objects.create(team=team))
for i in range(5)
)
self.assertEquals(
account_entities.union([team_entity]),
set(
Entity.objects.filter(id__in=(i.id for i in account_entities.union([team_entity]))).is_any_kind(
self.account_kind, self.team_kind)
))
def test_filter_manager_is_not_kind_two(self):
"""
Tests filtering by entity kind when two kinds are given.
"""
team = Team.objects.create()
for i in range(5):
Account.objects.create(team=team)
self.assertEquals([], list(Entity.objects.is_not_any_kind(self.team_kind, self.account_kind)))
def test_filter_manager_is_not_kind_one(self):
"""
Tests filtering by entity kind when one kind is given.
"""
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
account_entities = set(
Entity.objects.get_for_obj(Account.objects.create(team=team))
for i in range(5)
)
self.assertEquals([team_entity], list(Entity.objects.is_not_any_kind(self.account_kind)))
self.assertEquals(account_entities, set(Entity.objects.is_not_any_kind(self.team_kind)))
def test_filter_manager_is_not_kind_none(self):
"""
Tests filtering by entity kind when no kinds are given.
"""
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
account_entities = set(
Entity.objects.get_for_obj(Account.objects.create(team=team))
for i in range(5)
)
self.assertEquals(
account_entities.union([team_entity]), set(Entity.objects.is_not_any_kind()))
def test_filter_queryset_two_is_not_kinds(self):
"""
Tests filtering by entity type when two kinds are given on a queryset.
"""
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
account_entities = set(
Entity.objects.get_for_obj(Account.objects.create(team=team))
for i in range(5)
)
self.assertEquals(
[],
list(Entity.objects.filter(id__in=(i.id for i in account_entities.union([team_entity]))).is_not_any_kind(
self.account_kind, self.team_kind))
)
def test_is_sub_to_all_none(self):
"""
Tests the base case of is_sub_to_all on no super entities.
"""
# Create test accounts that have three types of super entities
team = Team.objects.create()
team2 = Team.objects.create()
team_group = TeamGroup.objects.create()
competitor = Competitor.objects.create()
# Create accounts that have four super entities
for i in range(5):
Account.objects.create(competitor=competitor, team=team, team2=team2, team_group=team_group)
# Create accounts that have no super entities
for i in range(5):
Entity.objects.get_for_obj(Account.objects.create())
self.assertEquals(
set(Entity.objects.all()), set(Entity.objects.is_sub_to_all()))
def test_is_sub_to_all_none_is_any_kind(self):
"""
Tests the base case of is_sub_to_all on no super entities with a kind specified.
"""
# Create test accounts that have three types of super entities
team = Team.objects.create()
team2 = Team.objects.create()
team_group = TeamGroup.objects.create()
competitor = Competitor.objects.create()
# Create accounts that have four super entities
for i in range(5):
Account.objects.create(competitor=competitor, team=team, team2=team2, team_group=team_group)
# Create accounts that have no super entities
for i in range(5):
Entity.objects.get_for_obj(Account.objects.create())
self.assertEquals(
set(Entity.objects.filter(entity_type=self.account_type)),
set(Entity.objects.is_sub_to_all().is_any_kind(self.account_kind)))
def test_is_sub_to_all_manager(self):
"""
Tests the is_sub_to_all for an entity directly from the entity manager.
"""
# Create test accounts that have three types of super entities
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
team2 = Team.objects.create()
team2_entity = Entity.objects.get_for_obj(team2)
team_group = TeamGroup.objects.create()
team_group_entity = Entity.objects.get_for_obj(team_group)
competitor = Competitor.objects.create()
competitor_entity = Entity.objects.get_for_obj(competitor)
# Create accounts that have four super entities
entities_4se = set(
Entity.objects.get_for_obj(
Account.objects.create(competitor=competitor, team=team, team2=team2, team_group=team_group))
for i in range(5)
)
# Create test accounts that have two super entities
entities_2se1 = set(
Entity.objects.get_for_obj(Account.objects.create(competitor=competitor, team_group=team_group))
for i in range(5)
)
entities_2se2 = set(
Entity.objects.get_for_obj(Account.objects.create(competitor=competitor, team=team)) for i in range(5)
)
# Create test accounts that have one super entity
entities_1se = set(Entity.objects.get_for_obj(Account.objects.create(team=team)) for i in range(5))
# Test various subset results
self.assertEquals(
entities_4se, set(Entity.objects.is_sub_to_all(
team_entity, team2_entity, team_group_entity, competitor_entity)))
self.assertEquals(
entities_1se | entities_2se2 | entities_4se, set(Entity.objects.is_sub_to_all(team_entity)))
self.assertEquals(
entities_4se | entities_2se2, set(Entity.objects.is_sub_to_all(team_entity, competitor_entity)))
self.assertEquals(
entities_4se | entities_2se1, set(Entity.objects.is_sub_to_all(team_group_entity)))
self.assertEquals(
entities_4se | entities_2se1 | entities_2se2,
set(Entity.objects.is_sub_to_all(competitor_entity)))
def test_is_sub_to_all_queryset(self):
"""
Tests the subset of super entity types for an entity from a queryset.
"""
# Create test accounts that have three types of super entities
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
team2 = Team.objects.create()
team2_entity = Entity.objects.get_for_obj(team2)
team_group = TeamGroup.objects.create()
team_group_entity = Entity.objects.get_for_obj(team_group)
competitor = Competitor.objects.create()
competitor_entity = Entity.objects.get_for_obj(competitor)
# Create accounts that have four super entities
entities_4se = list(
Entity.objects.get_for_obj(
Account.objects.create(competitor=competitor, team=team, team2=team2, team_group=team_group))
for i in range(5)
)
# Test subset results
self.assertEquals(
set(entities_4se).difference([entities_4se[0]]),
set(Entity.objects.exclude(id=entities_4se[0].id).is_sub_to_all(
team_entity, team2_entity, team_group_entity, competitor_entity)))
def test_is_sub_to_all_queryset_num_queries(self):
"""
Tests that is_sub_to_all only results in one query.
"""
# Create test accounts that have three types of super entities
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
team2 = Team.objects.create()
team2_entity = Entity.objects.get_for_obj(team2)
team_group = TeamGroup.objects.create()
team_group_entity = Entity.objects.get_for_obj(team_group)
competitor = Competitor.objects.create()
competitor_entity = Entity.objects.get_for_obj(competitor)
# Create accounts that have four super entities
entities_4se = list(
Entity.objects.get_for_obj(
Account.objects.create(competitor=competitor, team=team, team2=team2, team_group=team_group))
for i in range(5)
)
with self.assertNumQueries(1):
entities = set(Entity.objects.exclude(id=entities_4se[0].id).is_sub_to_all(
team_entity, team2_entity, team_group_entity, competitor_entity))
# Test subset results
self.assertEquals(set(entities_4se).difference([entities_4se[0]]), entities)
def test_is_sub_to_any_none(self):
"""
Tests the base case of is_sub_to_any on no super entities.
"""
# Create test accounts that have three types of super entities
team = Team.objects.create()
team2 = Team.objects.create()
team_group = TeamGroup.objects.create()
competitor = Competitor.objects.create()
# Create accounts that have four super entities
for i in range(5):
Account.objects.create(competitor=competitor, team=team, team2=team2, team_group=team_group)
# Create accounts that have no super entities
for i in range(5):
Entity.objects.get_for_obj(Account.objects.create())
self.assertEquals(
set(Entity.objects.all()), set(Entity.objects.is_sub_to_any()))
def test_is_sub_to_any_queryset(self):
"""
Tests the is_sub_to_any for an entity from a queryset.
"""
# Create test accounts that have three types of super entities
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
team2 = Team.objects.create()
team2_entity = Entity.objects.get_for_obj(team2)
team_group = TeamGroup.objects.create()
team_group_entity = Entity.objects.get_for_obj(team_group)
competitor = Competitor.objects.create()
competitor_entity = Entity.objects.get_for_obj(competitor)
# Create accounts that have four super entities
entities_4se = list(
Entity.objects.get_for_obj(
Account.objects.create(competitor=competitor, team=team, team2=team2, team_group=team_group))
for i in range(5)
)
# Test subset results
self.assertEquals(
set(entities_4se).difference([entities_4se[0]]),
set(Entity.objects.exclude(id=entities_4se[0].id).is_sub_to_any(
team_entity, team2_entity, team_group_entity, competitor_entity)))
def test_is_sub_to_any_limited_results(self):
"""
Tests the is_sub_to_any for an entity from a queryset where the is_sub_to_any returns less than all
of the entities created.
"""
# Create test accounts that have three types of super entities
team = Team.objects.create()
team_entity = Entity.objects.get_for_obj(team)
team2 = Team.objects.create()
# Create accounts that have super entites of team and team2
entities_w_team = list(
Entity.objects.get_for_obj(Account.objects.create(team=team))
for i in range(5)
)
for i in range(5):
Account.objects.create(team=team2)
# Test subset results
self.assertEquals(set(entities_w_team), set(Entity.objects.is_sub_to_any(team_entity)))
class TestEntityModel(EntityTestCase):
"""
Tests custom functionality in the Entity model.
"""
def test_get_super_entities_none(self):
"""
Tests that super entities are retrieved for mirrored entities that
have no super entities
"""
# Create an account with no team
account = Account.objects.create()
# Get its resulting entity
entity = Entity.objects.get_for_obj(account)
# Get its super entities. It should be an empty list
self.assertEquals(list(entity.get_super_entities()), [])
def test_get_super_entities_one(self):
"""
Tests retrieval of super entities when an entity has exactly one.
"""
# Create a team and an account with the team as a super entity.
team = Team.objects.create()
account = Account.objects.create(team=team)
# Get the entity of the account and the team
account_entity = Entity.objects.get_for_obj(account)
team_entity = Entity.objects.get_for_obj(team)
# Verify that the super entities of the account is the team
self.assertEquals(list(account_entity.get_super_entities()), [team_entity])
def test_get_sub_entities_none(self):
"""
Tests retrieval of sub entities when an entity has none.
"""
# Create a team
team = Team.objects.create()
# Get the entity of the team
team_entity = Entity.objects.get_for_obj(team)
# Verify that the sub entities of the team is an empty list
self.assertEquals(list(team_entity.get_super_entities()), [])
def test_get_sub_entities_one(self):
"""
Tests retrieval of sub entities when an entity has exactly one.
"""
# Create a team and an account with the team as a super entity.
team = Team.objects.create()
account = Account.objects.create(team=team)
# Get the entity of the account and the team
account_entity = Entity.objects.get_for_obj(account)
team_entity = Entity.objects.get_for_obj(team)
# Verify that the sub entities of the team is the account
self.assertEquals(list(team_entity.get_sub_entities()), [account_entity])
def test_unicode(self):
"""
Tests that the unicode method returns the display name of the entity.
"""
account = Account.objects.create(email='hi')
entity = Entity.objects.get_for_obj(account)
entity_unicode = entity.__str__()
self.assertEquals(entity_unicode, 'hi')
class EntityGroupAllEntitiesTest(EntityTestCase):
def setUp(self):
super(EntityGroupAllEntitiesTest, self).setUp()
# Create three super entities, each with two sub
# entities. THere are two entity kinds. Super entity 1 has two
# sub entities of the first kind, super entity 2 has subs of
# two different kinds, and super entity three has two of the
# second kind.
self.kind1, self.kind2 = G(EntityKind), G(EntityKind)
self.super_entities = [G(Entity) for _ in range(3)]
self.sub_entities = [
G(Entity, entity_kind=k)
for k in [self.kind1] * 3 + [self.kind2] * 3
]
for i, sub in enumerate(self.sub_entities):
sup = self.super_entities[i // 2]
G(EntityRelationship, sub_entity=sub, super_entity=sup)
self.group = G(EntityGroup)
def test_individual_entities_returned(self):
e = self.super_entities[0]
G(EntityGroupMembership, entity_group=self.group, entity=e, sub_entity_kind=None)
result = list(self.group.all_entities().order_by('id'))
expected = [e]
self.assertEqual(result, expected)
def test_sub_entity_group_entities_returned(self):
e = self.super_entities[0]
G(
EntityGroupMembership, entity_group=self.group,
entity=e, sub_entity_kind=self.kind1)
result = list(self.group.all_entities().order_by('id'))
expected = self.sub_entities[0:2]
self.assertEqual(result, expected)
def test_sub_entity_group_entities_filters_by_kind(self):
e = self.super_entities[1]
G(EntityGroupMembership, entity_group=self.group,
entity=e, sub_entity_kind=self.kind1)
result = list(self.group.all_entities().order_by('id'))
expected = [self.sub_entities[2]]
self.assertEqual(result, expected)
def test_combined_returned(self):
e = self.super_entities[1]
G(EntityGroupMembership, entity_group=self.group,
entity=e, sub_entity_kind=self.kind1)
G(EntityGroupMembership, entity_group=self.group,
entity=e, sub_entity_kind=None)
result = list(self.group.all_entities().order_by('id'))
expected = [e, self.sub_entities[2]]
self.assertEqual(result, expected)
def test_all_entities_of_a_kind_returned(self):
"""
When a group has a sub_entity_kind but a null entity, it should return all entities of that kind
"""
G(EntityGroupMembership, entity_group=self.group, sub_entity_kind=self.kind1)
result = list(self.group.all_entities().order_by('id'))
expected = [self.sub_entities[0], self.sub_entities[1], self.sub_entities[2]]
self.assertEqual(result, expected)
def test_filters_groups(self):
other_group = G(EntityGroup)
e = self.super_entities[1]
G(EntityGroupMembership, entity_group=self.group,
entity=e, sub_entity_kind=self.kind1)
G(EntityGroupMembership, entity_group=self.group,
entity=e, sub_entity_kind=None)
G(EntityGroupMembership, entity_group=other_group,
entity=e, sub_entity_kind=None)
result = self.group.all_entities().count()
expected = 2
self.assertEqual(result, expected)
def test_number_of_queries(self):
e1 = self.super_entities[1]
e2 = self.super_entities[2]
sub1 = self.sub_entities[0]
# Individual memberships
G(EntityGroupMembership, entity_group=self.group,
entity=e1, sub_entity_kind=None)
G(EntityGroupMembership, entity_group=self.group,
entity=sub1, sub_entity_kind=None)
# Group memberships
G(EntityGroupMembership, entity_group=self.group,
entity=e1, sub_entity_kind=self.kind1)
G(EntityGroupMembership, entity_group=self.group,
entity=e2, sub_entity_kind=self.kind2)
with self.assertNumQueries(3):
list(self.group.all_entities())
class EntityGroupAddEntityTest(EntityTestCase):
def test_adds_entity(self):
group = G(EntityGroup)
e = G(Entity)
membership = group.add_entity(e)
count = EntityGroupMembership.objects.filter(entity_group=group).count()
expected = 1
self.assertEqual(count, expected)
self.assertIsInstance(membership, EntityGroupMembership)
class EntityGroupBulkAddEntitiesTest(EntityTestCase):
def test_bulk_adds(self):
group = G(EntityGroup)
e1, e2 = G(Entity), G(Entity)
k = G(EntityKind)
to_add = [(e1, k), (e2, None)]
membership = group.bulk_add_entities(to_add)
count = EntityGroupMembership.objects.filter(entity_group=group).count()
expected = 2
self.assertEqual(count, expected)
self.assertIsInstance(membership[0], EntityGroupMembership)
def test_bulk_add_none_entity(self):
group = EntityGroup.objects.create()
e = G(Entity)
k = G(EntityKind)
group.bulk_add_entities([(None, k), (e, k), (e, None)])
count = EntityGroupMembership.objects.filter(entity_group=group).count()
self.assertEqual(count, 3)
class EntityGroupRemoveEntityTest(EntityTestCase):
def setUp(self):
self.group = G(EntityGroup)
self.e1, self.e2 = G(Entity), G(Entity)
self.k = G(EntityKind)
to_add = [(self.e1, self.k), (self.e2, None)]
self.group.bulk_add_entities(to_add)
def test_removes_only_selected_no_entity_kind(self):
self.group.remove_entity(self.e2)
member = EntityGroupMembership.objects.get(entity_group=self.group)
expected = self.k
self.assertEqual(member.sub_entity_kind, expected)
def test_removes_only_selected_with_entity_kind(self):
self.group.remove_entity(self.e1, self.k)
member = EntityGroupMembership.objects.get(entity_group=self.group)
expected = None
self.assertEqual(member.sub_entity_kind, expected)
class EntityGroupBulkRemoveEntitiesTest(EntityTestCase):
def setUp(self):
self.group = G(EntityGroup)
self.e1, self.e2, self.e3 = G(Entity), G(Entity), G(Entity)
self.k = G(EntityKind)
to_add = [(self.e1, self.k), (self.e2, None), (self.e3, self.k), (self.e3, None)]
self.group.bulk_add_entities(to_add)
def test_only_removes_selected(self):
self.group.bulk_remove_entities([(self.e3, self.k), (self.e2, None)])
count = EntityGroupMembership.objects.filter(entity_group=self.group).count()
expected = 2
self.assertEqual(count, expected)
def test_num_queries(self):
with self.assertNumQueries(2):
self.group.bulk_remove_entities([(self.e3, self.k), (self.e2, None)])
class EntityGroupBulkOverwriteEntitiesTest(EntityTestCase):
def setUp(self):
self.group = G(EntityGroup)
self.e1, self.e2, self.e3 = G(Entity), G(Entity), G(Entity)
self.k = G(EntityKind)
to_add = [(self.e1, self.k), (self.e2, None), (self.e3, self.k), (self.e3, None)]
self.group.bulk_add_entities(to_add)
def test_overwrites(self):
to_overwrite = [(self.e1, None), (self.e2, self.k)]
self.group.bulk_overwrite(to_overwrite)
count = EntityGroupMembership.objects.filter(entity_group=self.group).count()
new_members = EntityGroupMembership.objects.values_list('entity', 'sub_entity_kind')
expected = 2
self.assertEqual(count, expected)
self.assertIn((self.e1.id, None), new_members)
def test_bulk_overwrite_none_entity(self):
group = EntityGroup.objects.create()
group.bulk_overwrite([(None, self.k)])
count = EntityGroupMembership.objects.filter(entity_group=group).count()
self.assertEqual(count, 1)
class EntityGroupTest(TestCase):
def test_get_all_entities(self):
turn_off_syncing()
account_type = ContentType.objects.get_for_model(Account)
team_type = ContentType.objects.get_for_model(Team)
# Set up teams
teams = Team.objects.bulk_create([
Team()
for i in range(0, 3)
])
# Set up accounts
Account.objects.bulk_create([
Account(team=teams[i % 3])
for i in range(0, 20)
])
# Turn on syncing and do a sync
turn_on_syncing()
sync_entities()
account_entities = list(Entity.all_objects.filter(entity_type=account_type).order_by('entity_id'))
account_kind = account_entities[0].entity_kind
team_entities = list(Entity.all_objects.filter(entity_type=team_type).order_by('entity_id'))
team_kind = team_entities[0].entity_kind
# Create groups
EntityGroup.objects.bulk_create([
EntityGroup()
for i in range(0, 6)
])
# Refresh for django 1.9 because bulk create does not return ids
entity_groups = list(EntityGroup.objects.order_by('id'))
# Set up individual entity groups
entity_groups[0].bulk_add_entities([
[account_entities[0], None],
[account_entities[1], None],
[account_entities[2], None],
[account_entities[3], None],
])
entity_groups[1].bulk_add_entities([
[account_entities[2], None],
[account_entities[3], None],
[account_entities[4], None],
[account_entities[5], None],
])
# Set up sub entity kind of super entity groups
entity_groups[2].bulk_add_entities([
[team_entities[0], account_kind],
[team_entities[1], account_kind],
])
entity_groups[3].bulk_add_entities([
[team_entities[0], account_kind],
])
# Set up sub entity kind groups
# This group has two copies of the same set of all accounts as well as all teams
entity_groups[4].bulk_add_entities([
[None, account_kind],
[None, account_kind],
[None, team_kind],
])
# This group has the same copy of all accounts
entity_groups[5].bulk_add_entities([
[None, account_kind],
])
with self.assertNumQueries(3):
membership_cache = EntityGroup.objects.get_membership_cache()
entities_by_kind = get_entities_by_kind(membership_cache=membership_cache)
for entity_group in entity_groups:
entity_group.get_all_entities(membership_cache, entities_by_kind)
with self.assertNumQueries(3):
get_entities_by_kind()
# Make sure to hit the no group cache case
self.assertEqual(entity_groups[0].get_all_entities(membership_cache={1000: []}), set())
def test_get_all_entities_active(self):
"""
Makes sure only active entity ids are returned
"""
turn_off_syncing()
account_type = ContentType.objects.get_for_model(Account)
team_type = ContentType.objects.get_for_model(Team)
# Set up teams
teams = Team.objects.bulk_create([
Team()
for i in range(0, 3)
])
# Set up accounts
accounts = Account.objects.bulk_create([
Account(team=teams[i % 3])
for i in range(0, 20)
])
accounts[0].is_active = False
accounts[0].save()
# Turn on syncing and do a sync
turn_on_syncing()
sync_entities()
account_entities = list(Entity.all_objects.filter(entity_type=account_type).order_by('entity_id'))
# Create groups
EntityGroup.objects.bulk_create([
EntityGroup()
for i in range(0, 6)
])
# Refresh for django 1.9 because bulk create does not return ids
entity_groups = list(EntityGroup.objects.order_by('id'))
# Set up individual entity groups
entity_groups[0].bulk_add_entities([
[account_entities[0], None],
[account_entities[1], None],
[account_entities[2], None],
[account_entities[3], None],
])
# Group 0 consists of an inactive account (0) and 3 actives (1-3)
self.assertEqual(len(entity_groups[0].get_all_entities()), 3)
self.assertEqual(len(entity_groups[0].get_all_entities(is_active=False)), 1)
self.assertEqual(len(entity_groups[0].get_all_entities(is_active=None)), 4)
# Check the same thing for an entity group defined by a super entity and sub entity kind
team_entities = list(Entity.all_objects.filter(entity_type=team_type).order_by('entity_id'))
entity_groups[1].bulk_add_entities([
[team_entities[0], account_entities[0].entity_kind]
])
# Team 0 has accounts 0, 3, 6, 9, 12, 15, 18
# account 0 is inactive
self.assertEqual(len(entity_groups[1].get_all_entities()), 6)
self.assertEqual(len(entity_groups[1].get_all_entities(is_active=False)), 1)
self.assertEqual(len(entity_groups[1].get_all_entities(is_active=None)), 7)
entity_ids = list(entity_groups[1].get_all_entities())
# Make 3 more of them inactive
entity_ids = [
entity_id
for entity_id in entity_ids[0:3]
]
Entity.objects.filter(id__in=entity_ids).update(is_active=False)
self.assertEqual(len(entity_groups[1].get_all_entities()), 3)
self.assertEqual(len(entity_groups[1].get_all_entities(is_active=False)), 4)
self.assertEqual(len(entity_groups[1].get_all_entities(is_active=None)), 7)
# Set a super to inactive and make sure the active subs are ignored
team_entities = list(Entity.all_objects.filter(entity_type=team_type).order_by('entity_id'))
entity_groups[2].bulk_add_entities([
[team_entities[1], account_entities[0].entity_kind]
])
Entity.objects.filter(id=team_entities[1].id).update(is_active=False)
# Nothing should be returned because the super is inactive which makes the group invalid
self.assertEqual(len(entity_groups[2].get_all_entities()), 0)
self.assertEqual(len(entity_groups[2].get_all_entities(is_active=False)), 0)
self.assertEqual(len(entity_groups[2].get_all_entities(is_active=None)), 7)
| |
#!/usr/bin/env python
## FIXME: move these ones to the config file
ZMQ_ADDR='ipc:///tmp/eq/ether_inspector'
import colorama
from scapy.all import *
import pyearthquake
from pyearthquake.inspector.ether import *
from pyearthquake.signal.signal import *
from pyearthquake.signal.event import *
from pyearthquake.signal.action import *
import hexdump as hd # hexdump conflict with scapy.all.hexdump
import zktraffic # tested with b3e9dd0 (Jun 4 2015)
import zktraffic.base.client_message
import zktraffic.base.server_message
import zktraffic.zab.quorum_packet
import zktraffic.fle.message
LOG = pyearthquake.LOG.getChild(__name__)
# terrible table (TODO: move to config.json)
pid_table = {}
pid_table['192.168.42.1'] = 'zk1'
pid_table['192.168.42.2'] = 'zk2'
pid_table['192.168.42.3'] = 'zk3'
class Util(object):
## scapy hates inheritance from scapy.all.Packet class
@classmethod
def ip2pid(cls, ip):
# TODO: move to pyearthquake.util
try:
return pid_table[ip]
except KeyError:
return '_unknown_process_%s' % (ip)
@classmethod
def make_zktraffic_packet(cls, klazz, s, zk_klazz):
tcp, ip = klazz.underlayer, klazz.underlayer.underlayer
zt = zk_klazz.from_payload(s, '%s:%d' % (ip.src, tcp.sport), '%s:%d' % (ip.dst, tcp.dport), time.time())
return zt
@classmethod
def _make_message_from_zt(cls, klazz, zt):
msg = { 'class_group': klazz.name, 'class': zt.__class__.__name__ }
ignore_keys = ('timestr', 'src', 'dst', 'length', 'session_id', 'client_id', 'txn_time', 'txn_zxid', 'timeout', 'timestamp', 'ip', 'port', 'session', 'client') # because client port may differ
def gen():
for k in dir(zt):
v = getattr(zt, k)
cond = (isinstance(v, int) or isinstance(v, basestring)) and \
not k.isupper() and not k.startswith('_') and not '_literal' in k \
and not k == 'type'
if cond:
alt_k = '%s_literal' % k
if hasattr(zt, alt_k):
v = getattr(zt, alt_k)
yield k, v
for k, v in gen():
if k in ignore_keys: continue
if k == 'zxid':
msg['zxid_hi'] = v >> 32
msg['zxid_low'] = v & 0xFFFF
continue
msg[k] = v
return msg
@classmethod
def make_zktraffic_event(cls, klazz, zt):
tcp, ip = klazz.underlayer, klazz.underlayer.underlayer
src_process, dst_process = cls.ip2pid(ip.src), cls.ip2pid(ip.dst)
message = cls._make_message_from_zt(klazz, zt)
event = PacketEvent.from_message(src_process, dst_process, message)
return event
FOUR_LETTER_WORDS = ('dump', 'envi', 'kill', 'reqs', 'ruok', 'srst', 'stat')
@classmethod
def make_four_letter_event(cls, klazz, four_letter, reply=None):
tcp, ip = klazz.underlayer, klazz.underlayer.underlayer
src_process, dst_process = cls.ip2pid(ip.src), cls.ip2pid(ip.dst)
message = {'class_group': 'ZkFourLetterPacket', 'class': four_letter}
if reply: message['reply'] = reply
event = PacketEvent.from_message(src_process, dst_process, message)
return event
@classmethod
def make_summary(cls, klazz):
s = str(klazz.event)
return s
@classmethod
def print_dissection_error(cls, inst, s, e):
tcp, ip = inst.underlayer, inst.underlayer.underlayer
src, dst = intern("%s:%d" % (ip.src, tcp.sport)), intern("%s:%d" % (ip.dst, tcp.dport))
LOG.error('Error while dissecting %s (%s->%s)', inst.name, src, dst)
LOG.exception(e)
try:
LOG.error('Hexdump (%d bytes)', len(s))
for line in hd.hexdump(s, result='generator'):
LOG.error('%s', line)
except:
LOG.error('Error while hexdumping', exc_info=True)
class ZkQuorumPacket(Packet):
name = 'ZkQuorumPacket'
longname = 'ZooKeeper Zab Quorum Packet'
def do_dissect_payload(self, s):
try:
self.zt = Util.make_zktraffic_packet(self, s, zktraffic.zab.quorum_packet.QuorumPacket)
self.event = Util.make_zktraffic_event(self, self.zt)
self.add_payload(Raw(s))
except Exception as e:
Util.print_dissection_error(self, s, e)
raise e
def mysummary(self):
return Util.make_summary(self)
class ZkFLEPacket(Packet):
name = 'ZkFLEPacket'
longname = 'ZooKeeper Fast Leader Election Packet'
def do_dissect_payload(self, s):
try:
self.zt = Util.make_zktraffic_packet(self, s, zktraffic.fle.message.Message)
self.event = Util.make_zktraffic_event(self, self.zt)
self.add_payload(Raw(s))
except Exception as e:
Util.print_dissection_error(self, s, e)
raise e
def mysummary(self):
return Util.make_summary(self)
_requests_xids = defaultdict(dict)
_four_letter_mode = defaultdict(dict) # key: client addr, val: four letter
class ZkPacket(Packet):
"""
port 2181 packet
ATTENTION! this dissector has side-effects
"""
name = 'ZkPacket'
longname = 'ZooKeeper C/S Packet'
def _get_four_letter_mode(self, client):
global _four_letter_mode
if client in _four_letter_mode:
return _four_letter_mode[client]
else:
return None
def _set_four_letter_mode(self, client, four_letter=None):
global _four_letter_mode
if four_letter:
_four_letter_mode[client] = four_letter
else:
del _four_letter_mode[client]
def do_dissect_payload(self, s):
try:
## this complicated logic is based on zktraffic.base.sniffer.Sniffer
global _requests_xids
tcp, ip = self.underlayer, self.underlayer.underlayer
src, dst = intern("%s:%d" % (ip.src, tcp.sport)), intern("%s:%d" % (ip.dst, tcp.dport))
assert tcp.dport == 2181 or tcp.sport == 2181
if tcp.dport == 2181:
client, server = src, dst
if s.startswith(Util.FOUR_LETTER_WORDS):
self._set_four_letter_mode(client, s[0:4])
self.event = Util.make_four_letter_event(self, s[0:4])
else:
self.zt = zktraffic.base.client_message.ClientMessage.from_payload(s, client, server)
if not self.zt.is_ping and not self.zt.is_close:
_requests_xids[self.zt.client][self.zt.xid] = self.zt.opcode
self.event = Util.make_zktraffic_event(self, self.zt)
if tcp.sport == 2181:
client, server = dst, src
four_letter = self._get_four_letter_mode(client)
if four_letter:
self.event = Util.make_four_letter_event(self, four_letter, reply=s)
self._set_four_letter_mode(client, None)
else:
requests_xids = _requests_xids.get(client, {})
self.zt = zktraffic.base.server_message.ServerMessage.from_payload(s, client, server, requests_xids)
self.event = Util.make_zktraffic_event(self, self.zt)
self.add_payload(Raw(s))
except Exception as e:
Util.print_dissection_error(self, s, e)
raise e
def mysummary(self):
return Util.make_summary(self)
IGNORE_PING = False
class ZkInspector(EtherInspectorBase):
def __init__(self):
super(ZkInspector, self).__init__(zmq_addr=ZMQ_ADDR)
self.regist_layer_on_tcp(ZkQuorumPacket, 2888)
self.regist_layer_on_tcp(ZkFLEPacket, 3888)
self.regist_layer_on_tcp(ZkPacket, 2181)
def _print_packet_as(self, pkt, klazz, color):
LOG.debug(color + 'Received packet, event=%s' + colorama.Style.RESET_ALL,
pkt[klazz].mysummary())
def _map_ZkQuorumPacket_to_event(self, pkt):
event = pkt[ZkQuorumPacket].event
msg = event.option['message']
if IGNORE_PING and (msg['class_group'] == 'ZkQuorumPacket' and msg['class'] == 'Ping'):
return None
self._print_packet_as(pkt, ZkQuorumPacket, colorama.Back.WHITE + colorama.Fore.BLACK)
return event
def _map_ZkFLEPacket_to_event(self, pkt):
event = pkt[ZkFLEPacket].event
self._print_packet_as(pkt, ZkFLEPacket, colorama.Back.CYAN + colorama.Fore.BLACK)
return event
def _map_ZkPacket_to_event(self, pkt):
event = pkt[ZkPacket].event
msg = event.option['message']
self._print_packet_as(pkt, ZkPacket, colorama.Back.BLUE + colorama.Fore.WHITE)
if msg['class_group'] == 'ZkFourLetterPacket':
return None
return event
def map_packet_to_event(self, pkt):
"""
return None if this packet is NOT interesting at all.
"""
if pkt.haslayer(ZkQuorumPacket):
return self._map_ZkQuorumPacket_to_event(pkt)
elif pkt.haslayer(ZkFLEPacket):
return self._map_ZkFLEPacket_to_event(pkt)
elif pkt.haslayer(ZkPacket):
return self._map_ZkPacket_to_event(pkt)
else:
# LOG.debug('%s unknown packet: %s', self.__class__.__name__, pkt.mysummary())
return None
if __name__ == '__main__':
d = ZkInspector()
d.start()
| |
#-----------------------------------------------------------
# Threaded, Gevent and Prefork Servers
#-----------------------------------------------------------
import datetime
import errno
import logging
import os
import os.path
import platform
import psutil
import random
if os.name == 'posix':
import resource
else:
resource = None
import select
import signal
import socket
import subprocess
import sys
import threading
import time
import unittest2
import werkzeug.serving
try:
import fcntl
except ImportError:
pass
try:
from setproctitle import setproctitle
except ImportError:
setproctitle = lambda x: None
import openerp
from openerp.modules.registry import RegistryManager
from openerp.release import nt_service_name
import openerp.tools.config as config
from openerp.tools.misc import stripped_sys_argv, dumpstacks
_logger = logging.getLogger(__name__)
SLEEP_INTERVAL = 60 # 1 min
#----------------------------------------------------------
# Werkzeug WSGI servers patched
#----------------------------------------------------------
class LoggingBaseWSGIServerMixIn(object):
def handle_error(self, request, client_address):
t, e, _ = sys.exc_info()
if t == socket.error and e.errno == errno.EPIPE:
# broken pipe, ignore error
return
_logger.exception('Exception happened during processing of request from %s', client_address)
class BaseWSGIServerNoBind(LoggingBaseWSGIServerMixIn, werkzeug.serving.BaseWSGIServer):
""" werkzeug Base WSGI Server patched to skip socket binding. PreforkServer
use this class, sets the socket and calls the process_request() manually
"""
def __init__(self, app):
werkzeug.serving.BaseWSGIServer.__init__(self, "1", "1", app)
def server_bind(self):
# we dont bind beause we use the listen socket of PreforkServer#socket
# instead we close the socket
if self.socket:
self.socket.close()
def server_activate(self):
# dont listen as we use PreforkServer#socket
pass
class RequestHandler(werkzeug.serving.WSGIRequestHandler):
def setup(self):
# flag the current thread as handling a http request
super(RequestHandler, self).setup()
me = threading.currentThread()
me.name = 'openerp.service.http.request.%s' % (me.ident,)
# _reexec() should set LISTEN_* to avoid connection refused during reload time. It
# should also work with systemd socket activation. This is currently untested
# and not yet used.
class ThreadedWSGIServerReloadable(LoggingBaseWSGIServerMixIn, werkzeug.serving.ThreadedWSGIServer):
""" werkzeug Threaded WSGI Server patched to allow reusing a listen socket
given by the environement, this is used by autoreload to keep the listen
socket open when a reload happens.
"""
def __init__(self, host, port, app):
super(ThreadedWSGIServerReloadable, self).__init__(host, port, app,
handler=RequestHandler)
def server_bind(self):
envfd = os.environ.get('LISTEN_FDS')
if envfd and os.environ.get('LISTEN_PID') == str(os.getpid()):
self.reload_socket = True
self.socket = socket.fromfd(int(envfd), socket.AF_INET, socket.SOCK_STREAM)
# should we os.close(int(envfd)) ? it seem python duplicate the fd.
else:
self.reload_socket = False
super(ThreadedWSGIServerReloadable, self).server_bind()
def server_activate(self):
if not self.reload_socket:
super(ThreadedWSGIServerReloadable, self).server_activate()
#----------------------------------------------------------
# AutoReload watcher
#----------------------------------------------------------
class AutoReload(object):
def __init__(self, server):
self.server = server
self.files = {}
self.modules = {}
import pyinotify
class EventHandler(pyinotify.ProcessEvent):
def __init__(self, autoreload):
self.autoreload = autoreload
def process_IN_CREATE(self, event):
_logger.debug('File created: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
def process_IN_MODIFY(self, event):
_logger.debug('File modified: %s', event.pathname)
self.autoreload.files[event.pathname] = 1
self.wm = pyinotify.WatchManager()
self.handler = EventHandler(self)
self.notifier = pyinotify.Notifier(self.wm, self.handler, timeout=0)
mask = pyinotify.IN_MODIFY | pyinotify.IN_CREATE # IN_MOVED_FROM, IN_MOVED_TO ?
for path in openerp.modules.module.ad_paths:
_logger.info('Watching addons folder %s', path)
self.wm.add_watch(path, mask, rec=True)
def process_data(self, files):
xml_files = [i for i in files if i.endswith('.xml')]
for i in xml_files:
for path in openerp.modules.module.ad_paths:
if i.startswith(path):
# find out wich addons path the file belongs to
# and extract it's module name
right = i[len(path) + 1:].split('/')
if len(right) < 2:
continue
module = right[0]
self.modules[module] = 1
if self.modules:
_logger.info('autoreload: xml change detected, autoreload activated')
restart()
def process_python(self, files):
# process python changes
py_files = [i for i in files if i.endswith('.py')]
py_errors = []
# TODO keep python errors until they are ok
if py_files:
for i in py_files:
try:
source = open(i, 'rb').read() + '\n'
compile(source, i, 'exec')
except SyntaxError:
py_errors.append(i)
if py_errors:
_logger.info('autoreload: python code change detected, errors found')
for i in py_errors:
_logger.info('autoreload: SyntaxError %s', i)
else:
_logger.info('autoreload: python code updated, autoreload activated')
restart()
def check_thread(self):
# Check if some files have been touched in the addons path.
# If true, check if the touched file belongs to an installed module
# in any of the database used in the registry manager.
while 1:
while self.notifier.check_events(1000):
self.notifier.read_events()
self.notifier.process_events()
l = self.files.keys()
self.files.clear()
self.process_data(l)
self.process_python(l)
def run(self):
t = threading.Thread(target=self.check_thread)
t.setDaemon(True)
t.start()
_logger.info('AutoReload watcher running')
#----------------------------------------------------------
# Servers: Threaded, Gevented and Prefork
#----------------------------------------------------------
class CommonServer(object):
def __init__(self, app):
# TODO Change the xmlrpc_* options to http_*
self.app = app
# config
self.interface = config['xmlrpc_interface'] or '0.0.0.0'
self.port = config['xmlrpc_port']
# runtime
self.pid = os.getpid()
def close_socket(self, sock):
""" Closes a socket instance cleanly
:param sock: the network socket to close
:type sock: socket.socket
"""
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error, e:
# On OSX, socket shutdowns both sides if any side closes it
# causing an error 57 'Socket is not connected' on shutdown
# of the other side (or something), see
# http://bugs.python.org/issue4397
# note: stdlib fixed test, not behavior
if e.errno != errno.ENOTCONN or platform.system() not in ['Darwin', 'Windows']:
raise
sock.close()
class ThreadedServer(CommonServer):
def __init__(self, app):
super(ThreadedServer, self).__init__(app)
self.main_thread_id = threading.currentThread().ident
# Variable keeping track of the number of calls to the signal handler defined
# below. This variable is monitored by ``quit_on_signals()``.
self.quit_signals_received = 0
#self.socket = None
self.httpd = None
def signal_handler(self, sig, frame):
if sig in [signal.SIGINT, signal.SIGTERM]:
# shutdown on kill -INT or -TERM
self.quit_signals_received += 1
if self.quit_signals_received > 1:
# logging.shutdown was already called at this point.
sys.stderr.write("Forced shutdown.\n")
os._exit(0)
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
self.quit_signals_received += 1
def cron_thread(self, number):
while True:
time.sleep(SLEEP_INTERVAL + number) # Steve Reich timing style
registries = openerp.modules.registry.RegistryManager.registries
_logger.debug('cron%d polling for jobs', number)
for db_name, registry in registries.items():
while True and registry.ready:
acquired = openerp.addons.base.ir.ir_cron.ir_cron._acquire_job(db_name)
if not acquired:
break
def cron_spawn(self):
""" Start the above runner function in a daemon thread.
The thread is a typical daemon thread: it will never quit and must be
terminated when the main process exits - with no consequence (the processing
threads it spawns are not marked daemon).
"""
# Force call to strptime just before starting the cron thread
# to prevent time.strptime AttributeError within the thread.
# See: http://bugs.python.org/issue7980
datetime.datetime.strptime('2012-01-01', '%Y-%m-%d')
for i in range(openerp.tools.config['max_cron_threads']):
def target():
self.cron_thread(i)
t = threading.Thread(target=target, name="openerp.service.cron.cron%d" % i)
t.setDaemon(True)
t.start()
_logger.debug("cron%d started!" % i)
def http_thread(self):
def app(e, s):
return self.app(e, s)
self.httpd = ThreadedWSGIServerReloadable(self.interface, self.port, app)
self.httpd.serve_forever()
def http_spawn(self):
t = threading.Thread(target=self.http_thread, name="openerp.service.httpd")
t.setDaemon(True)
t.start()
_logger.info('HTTP service (werkzeug) running on %s:%s', self.interface, self.port)
def start(self, stop=False):
_logger.debug("Setting signal handlers")
if os.name == 'posix':
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
elif os.name == 'nt':
import win32api
win32api.SetConsoleCtrlHandler(lambda sig: self.signal_handler(sig, None), 1)
test_mode = config['test_enable'] or config['test_file']
if not stop or test_mode:
# some tests need the http deamon to be available...
self.http_spawn()
if not stop:
# only relevant if we are not in "--stop-after-init" mode
self.cron_spawn()
def stop(self):
""" Shutdown the WSGI server. Wait for non deamon threads.
"""
_logger.info("Initiating shutdown")
_logger.info("Hit CTRL-C again or send a second signal to force the shutdown.")
if self.httpd:
self.httpd.shutdown()
self.close_socket(self.httpd.socket)
# Manually join() all threads before calling sys.exit() to allow a second signal
# to trigger _force_quit() in case some non-daemon threads won't exit cleanly.
# threading.Thread.join() should not mask signals (at least in python 2.5).
me = threading.currentThread()
_logger.debug('current thread: %r', me)
for thread in threading.enumerate():
_logger.debug('process %r (%r)', thread, thread.isDaemon())
if thread != me and not thread.isDaemon() and thread.ident != self.main_thread_id:
while thread.isAlive():
_logger.debug('join and sleep')
# Need a busyloop here as thread.join() masks signals
# and would prevent the forced shutdown.
thread.join(0.05)
time.sleep(0.05)
_logger.debug('--')
openerp.modules.registry.RegistryManager.delete_all()
logging.shutdown()
def run(self, preload=None, stop=False):
""" Start the http server and the cron thread then wait for a signal.
The first SIGINT or SIGTERM signal will initiate a graceful shutdown while
a second one if any will force an immediate exit.
"""
self.start(stop=stop)
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Wait for a first signal to be handled. (time.sleep will be interrupted
# by the signal handler.) The try/except is for the win32 case.
try:
while self.quit_signals_received == 0:
time.sleep(60)
except KeyboardInterrupt:
pass
self.stop()
def reload(self):
os.kill(self.pid, signal.SIGHUP)
class GeventServer(CommonServer):
def __init__(self, app):
super(GeventServer, self).__init__(app)
self.port = config['longpolling_port']
self.httpd = None
def watch_parent(self, beat=4):
import gevent
ppid = os.getppid()
while True:
if ppid != os.getppid():
pid = os.getpid()
_logger.info("LongPolling (%s) Parent changed", pid)
# suicide !!
os.kill(pid, signal.SIGTERM)
return
gevent.sleep(beat)
def start(self):
import gevent
from gevent.wsgi import WSGIServer
if os.name == 'posix':
signal.signal(signal.SIGQUIT, dumpstacks)
gevent.spawn(self.watch_parent)
self.httpd = WSGIServer((self.interface, self.port), self.app)
_logger.info('Evented Service (longpolling) running on %s:%s', self.interface, self.port)
self.httpd.serve_forever()
def stop(self):
import gevent
self.httpd.stop()
gevent.shutdown()
def run(self, preload, stop):
self.start()
self.stop()
class PreforkServer(CommonServer):
""" Multiprocessing inspired by (g)unicorn.
PreforkServer (aka Multicorn) currently uses accept(2) as dispatching
method between workers but we plan to replace it by a more intelligent
dispatcher to will parse the first HTTP request line.
"""
def __init__(self, app):
# config
self.address = (config['xmlrpc_interface'] or '0.0.0.0', config['xmlrpc_port'])
self.population = config['workers']
self.timeout = config['limit_time_real']
self.limit_request = config['limit_request']
# working vars
self.beat = 4
self.app = app
self.pid = os.getpid()
self.socket = None
self.workers_http = {}
self.workers_cron = {}
self.workers = {}
self.generation = 0
self.queue = []
self.long_polling_pid = None
def pipe_new(self):
pipe = os.pipe()
for fd in pipe:
# non_blocking
flags = fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
# close_on_exec
flags = fcntl.fcntl(fd, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
return pipe
def pipe_ping(self, pipe):
try:
os.write(pipe[1], '.')
except IOError, e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def signal_handler(self, sig, frame):
if len(self.queue) < 5 or sig == signal.SIGCHLD:
self.queue.append(sig)
self.pipe_ping(self.pipe)
else:
_logger.warn("Dropping signal: %s", sig)
def worker_spawn(self, klass, workers_registry):
self.generation += 1
worker = klass(self)
pid = os.fork()
if pid != 0:
worker.pid = pid
self.workers[pid] = worker
workers_registry[pid] = worker
return worker
else:
worker.run()
sys.exit(0)
def long_polling_spawn(self):
nargs = stripped_sys_argv()
cmd = nargs[0]
cmd = os.path.join(os.path.dirname(cmd), "openerp-gevent")
nargs[0] = cmd
popen = subprocess.Popen([sys.executable] + nargs)
self.long_polling_pid = popen.pid
def worker_pop(self, pid):
if pid in self.workers:
_logger.debug("Worker (%s) unregistered", pid)
try:
self.workers_http.pop(pid, None)
self.workers_cron.pop(pid, None)
u = self.workers.pop(pid)
u.close()
except OSError:
return
def worker_kill(self, pid, sig):
try:
os.kill(pid, sig)
except OSError, e:
if e.errno == errno.ESRCH:
self.worker_pop(pid)
def process_signals(self):
while len(self.queue):
sig = self.queue.pop(0)
if sig in [signal.SIGINT, signal.SIGTERM]:
raise KeyboardInterrupt
elif sig == signal.SIGHUP:
# restart on kill -HUP
openerp.phoenix = True
raise KeyboardInterrupt
elif sig == signal.SIGQUIT:
# dump stacks on kill -3
self.dumpstacks()
elif sig == signal.SIGTTIN:
# increase number of workers
self.population += 1
elif sig == signal.SIGTTOU:
# decrease number of workers
self.population -= 1
def process_zombie(self):
# reap dead workers
while 1:
try:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if (status >> 8) == 3:
msg = "Critial worker error (%s)"
_logger.critical(msg, wpid)
raise Exception(msg % wpid)
self.worker_pop(wpid)
except OSError, e:
if e.errno == errno.ECHILD:
break
raise
def process_timeout(self):
now = time.time()
for (pid, worker) in self.workers.items():
if worker.watchdog_timeout is not None and \
(now - worker.watchdog_time) >= worker.watchdog_timeout:
_logger.error("Worker (%s) timeout", pid)
self.worker_kill(pid, signal.SIGKILL)
def process_spawn(self):
while len(self.workers_http) < self.population:
self.worker_spawn(WorkerHTTP, self.workers_http)
while len(self.workers_cron) < config['max_cron_threads']:
self.worker_spawn(WorkerCron, self.workers_cron)
if not self.long_polling_pid:
self.long_polling_spawn()
def sleep(self):
try:
# map of fd -> worker
fds = dict([(w.watchdog_pipe[0], w) for k, w in self.workers.items()])
fd_in = fds.keys() + [self.pipe[0]]
# check for ping or internal wakeups
ready = select.select(fd_in, [], [], self.beat)
# update worker watchdogs
for fd in ready[0]:
if fd in fds:
fds[fd].watchdog_time = time.time()
try:
# empty pipe
while os.read(fd, 1):
pass
except OSError, e:
if e.errno not in [errno.EAGAIN]:
raise
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def start(self):
# wakeup pipe, python doesnt throw EINTR when a syscall is interrupted
# by a signal simulating a pseudo SA_RESTART. We write to a pipe in the
# signal handler to overcome this behaviour
self.pipe = self.pipe_new()
# set signal handlers
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
signal.signal(signal.SIGHUP, self.signal_handler)
signal.signal(signal.SIGCHLD, self.signal_handler)
signal.signal(signal.SIGTTIN, self.signal_handler)
signal.signal(signal.SIGTTOU, self.signal_handler)
signal.signal(signal.SIGQUIT, dumpstacks)
# listen to socket
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.setblocking(0)
self.socket.bind(self.address)
self.socket.listen(8 * self.population)
def stop(self, graceful=True):
if self.long_polling_pid is not None:
# FIXME make longpolling process handle SIGTERM correctly
self.worker_kill(self.long_polling_pid, signal.SIGKILL)
self.long_polling_pid = None
if graceful:
_logger.info("Stopping gracefully")
limit = time.time() + self.timeout
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
while self.workers and time.time() < limit:
self.process_zombie()
time.sleep(0.1)
else:
_logger.info("Stopping forcefully")
for pid in self.workers.keys():
self.worker_kill(pid, signal.SIGTERM)
self.socket.close()
def run(self, preload, stop):
self.start()
rc = preload_registries(preload)
if stop:
self.stop()
return rc
# Empty the cursor pool, we dont want them to be shared among forked workers.
openerp.sql_db.close_all()
_logger.debug("Multiprocess starting")
while 1:
try:
#_logger.debug("Multiprocess beat (%s)",time.time())
self.process_signals()
self.process_zombie()
self.process_timeout()
self.process_spawn()
self.sleep()
except KeyboardInterrupt:
_logger.debug("Multiprocess clean stop")
self.stop()
break
except Exception, e:
_logger.exception(e)
self.stop(False)
return -1
class Worker(object):
""" Workers """
def __init__(self, multi):
self.multi = multi
self.watchdog_time = time.time()
self.watchdog_pipe = multi.pipe_new()
# Can be set to None if no watchdog is desired.
self.watchdog_timeout = multi.timeout
self.ppid = os.getpid()
self.pid = None
self.alive = True
# should we rename into lifetime ?
self.request_max = multi.limit_request
self.request_count = 0
def setproctitle(self, title=""):
setproctitle('openerp: %s %s %s' % (self.__class__.__name__, self.pid, title))
def close(self):
os.close(self.watchdog_pipe[0])
os.close(self.watchdog_pipe[1])
def signal_handler(self, sig, frame):
self.alive = False
def sleep(self):
try:
select.select([self.multi.socket], [], [], self.multi.beat)
except select.error, e:
if e[0] not in [errno.EINTR]:
raise
def process_limit(self):
if resource is None:
return
# If our parent changed sucide
if self.ppid != os.getppid():
_logger.info("Worker (%s) Parent changed", self.pid)
self.alive = False
# check for lifetime
if self.request_count >= self.request_max:
_logger.info("Worker (%d) max request (%s) reached.", self.pid, self.request_count)
self.alive = False
# Reset the worker if it consumes too much memory (e.g. caused by a memory leak).
rss, vms = psutil.Process(os.getpid()).get_memory_info()
if vms > config['limit_memory_soft']:
_logger.info('Worker (%d) virtual memory limit (%s) reached.', self.pid, vms)
self.alive = False # Commit suicide after the request.
# VMS and RLIMIT_AS are the same thing: virtual memory, a.k.a. address space
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
resource.setrlimit(resource.RLIMIT_AS, (config['limit_memory_hard'], hard))
# SIGXCPU (exceeded CPU time) signal handler will raise an exception.
r = resource.getrusage(resource.RUSAGE_SELF)
cpu_time = r.ru_utime + r.ru_stime
def time_expired(n, stack):
_logger.info('Worker (%d) CPU time limit (%s) reached.', config['limit_time_cpu'])
# We dont suicide in such case
raise Exception('CPU time limit exceeded.')
signal.signal(signal.SIGXCPU, time_expired)
soft, hard = resource.getrlimit(resource.RLIMIT_CPU)
resource.setrlimit(resource.RLIMIT_CPU, (cpu_time + config['limit_time_cpu'], hard))
def process_work(self):
pass
def start(self):
self.pid = os.getpid()
self.setproctitle()
_logger.info("Worker %s (%s) alive", self.__class__.__name__, self.pid)
# Reseed the random number generator
random.seed()
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(self.multi.socket, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(self.multi.socket, fcntl.F_SETFD, flags)
# reset blocking status
self.multi.socket.setblocking(0)
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
def stop(self):
pass
def run(self):
try:
self.start()
while self.alive:
self.process_limit()
self.multi.pipe_ping(self.watchdog_pipe)
self.sleep()
self.process_work()
_logger.info("Worker (%s) exiting. request_count: %s.", self.pid, self.request_count)
self.stop()
except Exception:
_logger.exception("Worker (%s) Exception occured, exiting..." % self.pid)
# should we use 3 to abort everything ?
sys.exit(1)
class WorkerHTTP(Worker):
""" HTTP Request workers """
def process_request(self, client, addr):
client.setblocking(1)
client.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Prevent fd inherientence close_on_exec
flags = fcntl.fcntl(client, fcntl.F_GETFD) | fcntl.FD_CLOEXEC
fcntl.fcntl(client, fcntl.F_SETFD, flags)
# do request using BaseWSGIServerNoBind monkey patched with socket
self.server.socket = client
# tolerate broken pipe when the http client closes the socket before
# receiving the full reply
try:
self.server.process_request(client, addr)
except IOError, e:
if e.errno != errno.EPIPE:
raise
self.request_count += 1
def process_work(self):
try:
client, addr = self.multi.socket.accept()
self.process_request(client, addr)
except socket.error, e:
if e[0] not in (errno.EAGAIN, errno.ECONNABORTED):
raise
def start(self):
Worker.start(self)
self.server = BaseWSGIServerNoBind(self.multi.app)
class WorkerCron(Worker):
""" Cron workers """
def __init__(self, multi):
super(WorkerCron, self).__init__(multi)
# process_work() below process a single database per call.
# The variable db_index is keeping track of the next database to
# process.
self.db_index = 0
def sleep(self):
# Really sleep once all the databases have been processed.
if self.db_index == 0:
interval = SLEEP_INTERVAL + self.pid % 10 # chorus effect
time.sleep(interval)
def _db_list(self):
if config['db_name']:
db_names = config['db_name'].split(',')
else:
db_names = openerp.service.db.exp_list(True)
return db_names
def process_work(self):
rpc_request = logging.getLogger('openerp.netsvc.rpc.request')
rpc_request_flag = rpc_request.isEnabledFor(logging.DEBUG)
_logger.debug("WorkerCron (%s) polling for jobs", self.pid)
db_names = self._db_list()
if len(db_names):
self.db_index = (self.db_index + 1) % len(db_names)
db_name = db_names[self.db_index]
self.setproctitle(db_name)
if rpc_request_flag:
start_time = time.time()
start_rss, start_vms = psutil.Process(os.getpid()).get_memory_info()
import openerp.addons.base as base
base.ir.ir_cron.ir_cron._acquire_job(db_name)
openerp.modules.registry.RegistryManager.delete(db_name)
# dont keep cursors in multi database mode
if len(db_names) > 1:
openerp.sql_db.close_db(db_name)
if rpc_request_flag:
run_time = time.time() - start_time
end_rss, end_vms = psutil.Process(os.getpid()).get_memory_info()
vms_diff = (end_vms - start_vms) / 1024
logline = '%s time:%.3fs mem: %sk -> %sk (diff: %sk)' % \
(db_name, run_time, start_vms / 1024, end_vms / 1024, vms_diff)
_logger.debug("WorkerCron (%s) %s", self.pid, logline)
self.request_count += 1
if self.request_count >= self.request_max and self.request_max < len(db_names):
_logger.error("There are more dabatases to process than allowed "
"by the `limit_request` configuration variable: %s more.",
len(db_names) - self.request_max)
else:
self.db_index = 0
def start(self):
os.nice(10) # mommy always told me to be nice with others...
Worker.start(self)
self.multi.socket.close()
#----------------------------------------------------------
# start/stop public api
#----------------------------------------------------------
server = None
def load_server_wide_modules():
for m in openerp.conf.server_wide_modules:
try:
openerp.modules.module.load_openerp_module(m)
except Exception:
msg = ''
if m == 'web':
msg = """
The `web` module is provided by the addons found in the `openerp-web` project.
Maybe you forgot to add those addons in your addons_path configuration."""
_logger.exception('Failed to load server-wide module `%s`.%s', m, msg)
def _reexec(updated_modules=None):
"""reexecute openerp-server process with (nearly) the same arguments"""
if openerp.tools.osutil.is_running_as_nt_service():
subprocess.call('net stop {0} && net start {0}'.format(nt_service_name), shell=True)
exe = os.path.basename(sys.executable)
args = stripped_sys_argv()
args += ["-u", ','.join(updated_modules)]
if not args or args[0] != exe:
args.insert(0, exe)
os.execv(sys.executable, args)
def load_test_file_yml(registry, test_file):
with registry.cursor() as cr:
openerp.tools.convert_yaml_import(cr, 'base', file(test_file), 'test', {}, 'init')
if config['test_commit']:
_logger.info('test %s has been commited', test_file)
cr.commit()
else:
_logger.info('test %s has been rollbacked', test_file)
cr.rollback()
def load_test_file_py(registry, test_file):
# Locate python module based on its filename and run the tests
test_path, _ = os.path.splitext(os.path.abspath(test_file))
for mod_name, mod_mod in sys.modules.items():
if mod_mod:
mod_path, _ = os.path.splitext(getattr(mod_mod, '__file__', ''))
if test_path == mod_path:
suite = unittest2.TestSuite()
for t in unittest2.TestLoader().loadTestsFromModule(mod_mod):
suite.addTest(t)
_logger.log(logging.INFO, 'running tests %s.', mod_mod.__name__)
stream = openerp.modules.module.TestStream()
result = unittest2.TextTestRunner(verbosity=2, stream=stream).run(suite)
success = result.wasSuccessful()
if hasattr(registry._assertion_report,'report_result'):
registry._assertion_report.report_result(success)
if not success:
_logger.error('%s: at least one error occurred in a test', test_file)
def preload_registries(dbnames):
""" Preload a registries, possibly run a test file."""
# TODO: move all config checks to args dont check tools.config here
config = openerp.tools.config
test_file = config['test_file']
dbnames = dbnames or []
rc = 0
for dbname in dbnames:
try:
update_module = config['init'] or config['update']
registry = RegistryManager.new(dbname, update_module=update_module)
# run test_file if provided
if test_file:
_logger.info('loading test file %s', test_file)
if test_file.endswith('yml'):
load_test_file_yml(registry, test_file)
elif test_file.endswith('py'):
load_test_file_py(registry, test_file)
if registry._assertion_report.failures:
rc += 1
except Exception:
_logger.critical('Failed to initialize database `%s`.', dbname, exc_info=True)
return -1
return rc
def start(preload=None, stop=False):
""" Start the openerp http server and cron processor.
"""
global server
load_server_wide_modules()
if openerp.evented:
server = GeventServer(openerp.service.wsgi_server.application)
elif config['workers']:
server = PreforkServer(openerp.service.wsgi_server.application)
else:
server = ThreadedServer(openerp.service.wsgi_server.application)
if config['auto_reload']:
autoreload = AutoReload(server)
autoreload.run()
rc = server.run(preload, stop)
# like the legend of the phoenix, all ends with beginnings
if getattr(openerp, 'phoenix', False):
modules = []
if config['auto_reload']:
modules = autoreload.modules.keys()
_reexec(modules)
return rc if rc else 0
def restart():
""" Restart the server
"""
if os.name == 'nt':
# run in a thread to let the current thread return response to the caller.
threading.Thread(target=_reexec).start()
else:
os.kill(server.pid, signal.SIGHUP)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| |
"""
This module provides the security predicates used in decorating various models.
"""
import logging
from collections import defaultdict
from pylons import c, request
from webob import exc
from itertools import chain
from ming.utils import LazyProperty
from allura.lib.utils import TruthyCallable
log = logging.getLogger(__name__)
class Credentials(object):
'''
Role graph logic & caching
'''
def __init__(self):
self.clear()
@classmethod
def get(cls):
'get the global Credentials instance'
import allura
return allura.credentials
def clear(self):
'clear cache'
self.users = {}
self.projects = {}
def clear_user(self, user_id, project_id=None):
self.users.pop((user_id, project_id), None)
def load_user_roles(self, user_id, *project_ids):
'''Load the credentials with all user roles for a set of projects'''
from allura import model as M
# Don't reload roles
project_ids = [ pid for pid in project_ids if self.users.get((user_id, pid)) is None ]
if not project_ids: return
if user_id is None:
q = M.ProjectRole.query.find(
dict(
project_id={'$in': project_ids},
name='*anonymous'))
else:
q0 = M.ProjectRole.query.find(
dict(project_id={'$in': list(project_ids)},
name={'$in':['*anonymous', '*authenticated']}))
q1 = M.ProjectRole.query.find(
dict(project_id={'$in': list(project_ids)},user_id=user_id))
q = chain(q0, q1)
roles_by_project = dict((pid, []) for pid in project_ids)
for role in q:
roles_by_project[role.project_id].append(role)
for pid, roles in roles_by_project.iteritems():
self.users[user_id, pid] = RoleCache(self, roles)
def load_project_roles(self, *project_ids):
'''Load the credentials with all user roles for a set of projects'''
from allura import model as M
# Don't reload roles
project_ids = [ pid for pid in project_ids if self.projects.get(pid) is None ]
if not project_ids: return
q = M.ProjectRole.query.find(dict(
project_id={'$in': project_ids}))
roles_by_project = dict((pid, []) for pid in project_ids)
for role in q:
roles_by_project[role.project_id].append(role)
for pid, roles in roles_by_project.iteritems():
self.projects[pid] = RoleCache(self, roles)
def project_roles(self, project_id):
'''
:returns: a RoleCache of ProjectRoles for project_id
'''
roles = self.projects.get(project_id)
if roles is None:
self.load_project_roles(project_id)
roles = self.projects[project_id]
return roles
def user_roles(self, user_id, project_id=None):
'''
:returns: a RoleCache of ProjectRoles for given user_id and project_id, *anonymous and *authenticated checked as appropriate
'''
from allura import model as M
roles = self.users.get((user_id, project_id))
if roles is None:
if project_id is None:
if user_id is None:
q = []
else:
q = M.ProjectRole.query.find(dict(user_id=user_id))
roles = RoleCache(self, q)
else:
self.load_user_roles(user_id, project_id)
roles = self.users.get((user_id, project_id))
self.users[user_id, project_id] = roles
return roles
def user_has_any_role(self, user_id, project_id, role_ids):
user_roles = self.user_roles(user_id=user_id, project_id=project_id)
return bool(set(role_ids) & user_roles.reaching_ids_set)
def users_with_named_role(self, project_id, name):
""" returns in sorted order """
roles = self.project_roles(project_id)
return sorted(RoleCache(self, roles.find(name=name)).users_that_reach, key=lambda u:u.username)
def userids_with_named_role(self, project_id, name):
roles = self.project_roles(project_id)
return RoleCache(self, roles.find(name=name)).userids_that_reach
class RoleCache(object):
def __init__(self, cred, q):
self.cred = cred
self.q = q
def find(self, **kw):
tests = kw.items()
def _iter():
for r in self:
for k,v in tests:
val = getattr(r, k)
if callable(v):
if not v(val): break
elif v != val: break
else:
yield r
return RoleCache(self.cred, _iter())
def get(self, **kw):
for x in self.find(**kw): return x
return None
def __iter__(self):
return self.index.itervalues()
def __len__(self):
return len(self.index)
@LazyProperty
def index(self):
return dict((r._id, r) for r in self.q)
@LazyProperty
def named(self):
return RoleCache(self.cred, (
r for r in self
if r.name and not r.name.startswith('*')))
@LazyProperty
def reverse_index(self):
rev_index = defaultdict(list)
for r in self:
for rr_id in r.roles:
rev_index[rr_id].append(r)
return rev_index
@LazyProperty
def roles_that_reach(self):
def _iter():
visited = set()
to_visit = list(self)
while to_visit:
r = to_visit.pop(0)
if r in visited: continue
visited.add(r)
yield r
pr_rindex = self.cred.project_roles(r.project_id).reverse_index
to_visit += pr_rindex[r._id]
return RoleCache(self.cred, _iter())
@LazyProperty
def users_that_reach(self):
return [
r.user for r in self.roles_that_reach if r.user ]
@LazyProperty
def userids_that_reach(self):
return [
r.user_id for r in self.roles_that_reach ]
@LazyProperty
def reaching_roles(self):
def _iter():
to_visit = self.index.items()
visited = set()
while to_visit:
(rid, role) = to_visit.pop()
if rid in visited: continue
yield role
pr_index = self.cred.project_roles(role.project_id).index
if rid in pr_index:
for i in pr_index[rid].roles:
if i in pr_index:
to_visit.append((i, pr_index[i]))
return RoleCache(self.cred, _iter())
@LazyProperty
def reaching_ids(self):
return [ r._id for r in self.reaching_roles ]
@LazyProperty
def reaching_ids_set(self):
return set(self.reaching_ids)
def has_access(obj, permission, user=None, project=None):
'''Return whether the given user has the permission name on the given object.
- First, all the roles for a user in the given project context are computed.
- Next, for each role, the given object's ACL is examined linearly. If an ACE
is found which matches the permission and user, and that ACE ALLOWs access,
then the function returns True and access is permitted. If the ACE DENYs
access, then that role is removed from further consideration.
- If the obj is not a Neighborhood and the given user has the 'admin'
permission on the current neighborhood, then the function returns True and
access is allowed.
- If the obj is not a Project and the given user has the 'admin'
permission on the current project, then the function returns True and
access is allowed.
- If none of the ACEs on the object ALLOW access, and there are no more roles
to be considered, then the function returns False and access is denied.
- Processing continues using the remaining roles and the
obj.parent_security_context(). If the parent_security_context is None, then
the function returns False and access is denied.
The effect of this processing is that if *any* role for the user is ALLOWed
access via a linear traversal of the ACLs, then access is allowed. All of the
users roles must either be explicitly DENYed or processing terminate with no
matches to DENY access to the resource.
'''
from allura import model as M
def predicate(obj=obj, user=user, project=project, roles=None):
if obj is None:
return False
if roles is None:
if user is None: user = c.user
assert user, 'c.user should always be at least M.User.anonymous()'
cred = Credentials.get()
if project is None:
if isinstance(obj, M.Neighborhood):
project = obj.neighborhood_project
if project is None:
log.error('Neighborhood project missing for %s', obj)
return False
elif isinstance(obj, M.Project):
project = obj.root_project
else:
project = c.project.root_project
roles = cred.user_roles(user_id=user._id, project_id=project._id).reaching_ids
chainable_roles = []
for rid in roles:
for ace in obj.acl:
if M.ACE.match(ace, rid, permission):
if ace.access == M.ACE.ALLOW:
# access is allowed
# log.info('%s: True', txt)
return True
else:
# access is denied for this role
break
else:
# access neither allowed or denied, may chain to parent context
chainable_roles.append(rid)
parent = obj.parent_security_context()
if parent and chainable_roles:
result = has_access(parent, permission, user=user, project=project)(
roles=tuple(chainable_roles))
elif not isinstance(obj, M.Neighborhood):
result = has_access(project.neighborhood, 'admin', user=user)()
if not (result or isinstance(obj, M.Project)):
result = has_access(project, 'admin', user=user)()
else:
result = False
# log.info('%s: %s', txt, result)
return result
return TruthyCallable(predicate)
def require(predicate, message=None):
'''
Example: require(has_access(c.app, 'read'))
:param callable predicate: truth function to call
:param str message: message to show upon failure
:raises: HTTPForbidden or HTTPUnauthorized
'''
from allura import model as M
if predicate(): return
if not message:
message = """You don't have permission to do that.
You must ask a project administrator for rights to perform this task.
Please click the back button to return to the previous page."""
if c.user != M.User.anonymous():
request.environ['error_message'] = message
raise exc.HTTPForbidden(detail=message)
else:
raise exc.HTTPUnauthorized()
def require_access(obj, permission, **kwargs):
if obj is not None:
predicate = has_access(obj, permission, **kwargs)
return require(predicate, message='%s access required' % permission.capitalize())
else:
raise exc.HTTPForbidden(detail="Could not verify permissions for this page.")
def require_authenticated():
'''
:raises: HTTPUnauthorized if current user is anonymous
'''
from allura import model as M
if c.user == M.User.anonymous():
raise exc.HTTPUnauthorized()
def simple_grant(acl, role_id, permission):
from allura.model.types import ACE
for ace in acl:
if ace.role_id == role_id and ace.permission == permission: return
acl.append(ACE.allow(role_id, permission))
def simple_revoke(acl, role_id, permission):
remove = []
for i, ace in enumerate(acl):
if ace.role_id == role_id and ace.permission == permission:
remove.append(i)
for i in reversed(remove):
acl.pop(i)
| |
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from unittest import TestCase
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import hmm
from sklearn import mixture
from sklearn.utils.extmath import logsumexp
from sklearn.utils import check_random_state
from nose import SkipTest
rng = np.random.RandomState(0)
np.seterr(all='warn')
class TestBaseHMM(TestCase):
def setUp(self):
self.prng = np.random.RandomState(9)
class StubHMM(hmm._BaseHMM):
def _compute_log_likelihood(self, X):
return self.framelogprob
def _generate_sample_from_state(self):
pass
def _init(self):
pass
def setup_example_hmm(self):
# Example from http://en.wikipedia.org/wiki/Forward-backward_algorithm
h = self.StubHMM(2)
h.transmat_ = [[0.7, 0.3], [0.3, 0.7]]
h.startprob_ = [0.5, 0.5]
framelogprob = np.log([[0.9, 0.2],
[0.9, 0.2],
[0.1, 0.8],
[0.9, 0.2],
[0.9, 0.2]])
# Add dummy observations to stub.
h.framelogprob = framelogprob
return h, framelogprob
def test_init(self):
h, framelogprob = self.setup_example_hmm()
for params in [('transmat_',), ('startprob_', 'transmat_')]:
d = dict((x[:-1], getattr(h, x)) for x in params)
h2 = self.StubHMM(h.n_components, **d)
self.assertEqual(h.n_components, h2.n_components)
for p in params:
assert_array_almost_equal(getattr(h, p), getattr(h2, p))
def test_set_startprob(self):
h, framelogprob = self.setup_example_hmm()
startprob = np.array([0.0, 1.0])
h.startprob_ = startprob
assert np.allclose(startprob, h.startprob_)
def test_set_transmat(self):
h, framelogprob = self.setup_example_hmm()
transmat = np.array([[0.8, 0.2], [0.0, 1.0]])
h.transmat_ = transmat
assert np.allclose(transmat, h.transmat_)
def test_do_forward_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, fwdlattice = h._do_forward_pass(framelogprob)
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
reffwdlattice = np.array([[0.4500, 0.1000],
[0.3105, 0.0410],
[0.0230, 0.0975],
[0.0408, 0.0150],
[0.0298, 0.0046]])
assert_array_almost_equal(np.exp(fwdlattice), reffwdlattice, 4)
def test_do_backward_pass(self):
h, framelogprob = self.setup_example_hmm()
bwdlattice = h._do_backward_pass(framelogprob)
refbwdlattice = np.array([[0.0661, 0.0455],
[0.0906, 0.1503],
[0.4593, 0.2437],
[0.6900, 0.4100],
[1.0000, 1.0000]])
assert_array_almost_equal(np.exp(bwdlattice), refbwdlattice, 4)
def test_do_viterbi_pass(self):
h, framelogprob = self.setup_example_hmm()
logprob, state_sequence = h._do_viterbi_pass(framelogprob)
refstate_sequence = [0, 0, 1, 0, 0]
assert_array_equal(state_sequence, refstate_sequence)
reflogprob = -4.4590
self.assertAlmostEqual(logprob, reflogprob, places=4)
def test_eval(self):
h, framelogprob = self.setup_example_hmm()
nobs = len(framelogprob)
logprob, posteriors = h.eval([])
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
reflogprob = -3.3725
self.assertAlmostEqual(logprob, reflogprob, places=4)
refposteriors = np.array([[0.8673, 0.1327],
[0.8204, 0.1796],
[0.3075, 0.6925],
[0.8204, 0.1796],
[0.8673, 0.1327]])
assert_array_almost_equal(posteriors, refposteriors, decimal=4)
def test_hmm_eval_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
logprob, hmmposteriors = h.eval([])
assert_array_almost_equal(hmmposteriors.sum(axis=1), np.ones(nobs))
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
assert_array_almost_equal(hmmposteriors, gmmposteriors)
def test_hmm_decode_consistent_with_gmm(self):
n_components = 8
nobs = 10
h = self.StubHMM(n_components)
# Add dummy observations to stub.
framelogprob = np.log(self.prng.rand(nobs, n_components))
h.framelogprob = framelogprob
# If startprob and transmat are uniform across all states (the
# default), the transitions are uninformative - the model
# reduces to a GMM with uniform mixing weights (in terms of
# posteriors, not likelihoods).
viterbi_ll, state_sequence = h.decode([])
norm = logsumexp(framelogprob, axis=1)[:, np.newaxis]
gmmposteriors = np.exp(framelogprob - np.tile(norm, (1, n_components)))
gmmstate_sequence = gmmposteriors.argmax(axis=1)
assert_array_equal(state_sequence, gmmstate_sequence)
def test_base_hmm_attributes(self):
n_components = 20
startprob = self.prng.rand(n_components)
startprob = startprob / startprob.sum()
transmat = self.prng.rand(n_components, n_components)
transmat /= np.tile(transmat.sum(axis=1)
[:, np.newaxis], (1, n_components))
h = self.StubHMM(n_components)
self.assertEqual(h.n_components, n_components)
h.startprob_ = startprob
assert_array_almost_equal(h.startprob_, startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((n_components - 2, 2)))
h.transmat_ = transmat
assert_array_almost_equal(h.transmat_, transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((n_components - 2, n_components)))
def train_hmm_and_keep_track_of_log_likelihood(hmm, obs, n_iter=1, **kwargs):
hmm.n_iter = 1
hmm.fit(obs)
loglikelihoods = []
for n in xrange(n_iter):
hmm.n_iter = 1
hmm.init_params = ''
hmm.fit(obs)
loglikelihoods.append(sum(hmm.score(x) for x in obs))
return loglikelihoods
class GaussianHMMBaseTester(object):
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
hmm.GaussianHMM(20, self.covariance_type)
self.assertRaises(ValueError, hmm.GaussianHMM, 20,
'badcovariance_type')
def test_eval_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(range(self.n_components), 5)
nobs = len(gaussidx)
obs = self.prng.randn(nobs, self.n_features) + h.means_[gaussidx]
ll, posteriors = h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
h.startprob_ = self.startprob
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmc', n_iter=5, verbose=False, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print ('Test train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
delta_min = np.diff(trainll).min()
self.assertTrue(
delta_min > -0.8,
"The min nll increase is %f which is lower than the admissible"
" threshold of %f, for model %s. The likelihoods are %s."
% (delta_min, -0.8, self.covariance_type, trainll))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
def test_fit_with_priors(self, params='stmc', n_iter=5, verbose=False):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs[:1])
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print ('Test MAP train: %s (%s)\n %s\n %s'
% (self.covariance_type, params, trainll, np.diff(trainll)))
# XXX: Why such a large tolerance?
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_non_ergodic_transmat(self):
startprob = np.array([1, 0, 0, 0, 0])
transmat = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h = hmm.GaussianHMM(n_components=5,
covariance_type='full', startprob=startprob,
transmat=transmat, n_iter=100, init_params='st')
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
obs = [h.sample(10)[0] for _ in range(10)]
h.fit(obs=obs)
class TestGaussianHMMWithSphericalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMBaseTester, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_symbols = 3 # ('walk', 'shop', 'clean')
self.emissionprob = [[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]]
self.startprob = [0.6, 0.4]
self.transmat = [[0.7, 0.3], [0.4, 0.6]]
self.h = hmm.MultinomialHMM(self.n_components,
startprob=self.startprob,
transmat=self.transmat)
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
observations = [0, 1, 2]
logprob, state_sequence = self.h.decode(observations)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
observations = [0, 1, 2]
h = hmm.MultinomialHMM(self.n_components, startprob=self.startprob,
transmat=self.transmat, algorithm="map",)
h.emissionprob_ = self.emissionprob
logprob, state_sequence = h.decode(observations)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
observations = [0, 1, 2]
state_sequence = self.h.predict(observations)
posteriors = self.h.predict_proba(observations)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'emissionprob_',
np.zeros((self.n_components - 2, self.n_symbols)))
self.assertEqual(h.n_symbols, self.n_symbols)
def test_eval(self):
idx = np.repeat(range(self.n_components), 10)
nobs = len(idx)
obs = [int(x) for x in np.floor(self.prng.rand(nobs) * self.n_symbols)]
ll, posteriors = self.h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
def test_sample(self, n=1000):
samples = self.h.sample(n)[0]
self.assertEqual(len(samples), n)
self.assertEqual(len(np.unique(samples)), self.n_symbols)
def test_fit(self, params='ste', n_iter=5, verbose=False, **kwargs):
h = self.h
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = hmm.normalize(
self.prng.rand(self.n_components, self.n_symbols), axis=1)
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10)[0] for x in xrange(10)]
# use init_function to initialize paramerters
learner._init(train_obs, params)
trainll = train_hmm_and_keep_track_of_log_likelihood(
learner, train_obs, n_iter=n_iter, params=params, **kwargs)[1:]
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in xrange(n_mix)])
}[covariance_type]
g.weights_ = hmm.normalize(prng.rand(n_mix))
return g
class GMMHMMBaseTester(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms_ = []
for state in xrange(self.n_components):
self.gmms_.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_attributes(self):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
assert_array_almost_equal(h.startprob_, self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
2 * self.startprob)
self.assertRaises(ValueError, h.__setattr__, 'startprob_', [])
self.assertRaises(ValueError, h.__setattr__, 'startprob_',
np.zeros((self.n_components - 2, self.n_features)))
h.transmat_ = self.transmat
assert_array_almost_equal(h.transmat_, self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
2 * self.transmat)
self.assertRaises(ValueError, h.__setattr__, 'transmat_', [])
self.assertRaises(ValueError, h.__setattr__, 'transmat_',
np.zeros((self.n_components - 2, self.n_components)))
def test_eval_and_decode(self):
h = hmm.GMMHMM(self.n_components, gmms=self.gmms_)
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(range(self.n_components), 5)
nobs = len(refstateseq)
obs = [h.gmms_[x].sample(1).flatten() for x in refstateseq]
ll, posteriors = h.eval(obs)
self.assertEqual(posteriors.shape, (nobs, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(nobs))
viterbi_ll, stateseq = h.decode(obs)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, self.covariance_type,
startprob=self.startprob, transmat=self.transmat,
gmms=self.gmms_)
samples = h.sample(n)[0]
self.assertEqual(samples.shape, (n, self.n_features))
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = hmm.normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms_
# Create training data by sampling from the HMM.
train_obs = [h.sample(n=10, random_state=self.prng)[0]
for x in xrange(10)]
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(train_obs)
h.transmat_ = hmm.normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = hmm.normalize(self.prng.rand(self.n_components))
trainll = train_hmm_and_keep_track_of_log_likelihood(
h, train_obs, n_iter=n_iter, params=params)[1:]
if not np.all(np.diff(trainll) > 0) and verbose:
print
print 'Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
obs = [self.prng.rand(3, self.n_features),
self.prng.rand(4, self.n_features),
self.prng.rand(5, self.n_features)]
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(obs)
class TestGMMHMMWithDiagCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMBaseTester, TestCase):
covariance_type = 'full'
def test_normalize_1D():
A = rng.rand(2) + 1.0
for axis in range(1):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
def test_normalize_3D():
A = rng.rand(2, 2, 2) + 1.0
for axis in range(3):
Anorm = hmm.normalize(A, axis)
assert np.all(np.allclose(Anorm.sum(axis), 1.0))
| |
import datetime
import os
import uuid
from decimal import Decimal
import django
import pytest
from django.http import QueryDict
from django.utils import timezone
import rest_framework
from rest_framework import serializers
# Tests for field keyword arguments and core functionality.
# ---------------------------------------------------------
class TestEmpty:
"""
Tests for `required`, `allow_null`, `allow_blank`, `default`.
"""
def test_required(self):
"""
By default a field must be included in the input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation()
assert exc_info.value.detail == ['This field is required.']
def test_not_required(self):
"""
If `required=False` then a field may be omitted from the input.
"""
field = serializers.IntegerField(required=False)
with pytest.raises(serializers.SkipField):
field.run_validation()
def test_disallow_null(self):
"""
By default `None` is not a valid input.
"""
field = serializers.IntegerField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(None)
assert exc_info.value.detail == ['This field may not be null.']
def test_allow_null(self):
"""
If `allow_null=True` then `None` is a valid input.
"""
field = serializers.IntegerField(allow_null=True)
output = field.run_validation(None)
assert output is None
def test_disallow_blank(self):
"""
By default '' is not a valid input.
"""
field = serializers.CharField()
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation('')
assert exc_info.value.detail == ['This field may not be blank.']
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.CharField(allow_blank=True)
output = field.run_validation('')
assert output == ''
def test_default(self):
"""
If `default` is set, then omitted values get the default input.
"""
field = serializers.IntegerField(default=123)
output = field.run_validation()
assert output is 123
class TestSource:
def test_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='other')
serializer = ExampleSerializer(data={'example_field': 'abc'})
assert serializer.is_valid()
assert serializer.validated_data == {'other': 'abc'}
def test_redundant_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `source='example_field'` on field "
"'CharField' in serializer 'ExampleSerializer', because it is the "
"same as the field name. Remove the `source` keyword argument."
)
def test_callable_source(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable')
class ExampleInstance(object):
def example_callable(self):
return 'example callable value'
serializer = ExampleSerializer(ExampleInstance())
assert serializer.data['example_field'] == 'example callable value'
def test_callable_source_raises(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.CharField(source='example_callable', read_only=True)
class ExampleInstance(object):
def example_callable(self):
raise AttributeError('method call failed')
with pytest.raises(ValueError) as exc_info:
serializer = ExampleSerializer(ExampleInstance())
serializer.data.items()
assert 'method call failed' in str(exc_info.value)
class TestReadOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
read_only = serializers.ReadOnlyField()
writable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_read_only(self):
"""
Read-only serializers.should not be included in validation.
"""
data = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'writable': 456}
def test_serialize_read_only(self):
"""
Read-only serializers.should be serialized.
"""
instance = {'read_only': 123, 'writable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'read_only': 123, 'writable': 456}
class TestWriteOnly:
def setup(self):
class TestSerializer(serializers.Serializer):
write_only = serializers.IntegerField(write_only=True)
readable = serializers.IntegerField()
self.Serializer = TestSerializer
def test_validate_write_only(self):
"""
Write-only serializers.should be included in validation.
"""
data = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(data=data)
assert serializer.is_valid()
assert serializer.validated_data == {'write_only': 123, 'readable': 456}
def test_serialize_write_only(self):
"""
Write-only serializers.should not be serialized.
"""
instance = {'write_only': 123, 'readable': 456}
serializer = self.Serializer(instance)
assert serializer.data == {'readable': 456}
class TestInitial:
def setup(self):
class TestSerializer(serializers.Serializer):
initial_field = serializers.IntegerField(initial=123)
blank_field = serializers.IntegerField()
self.serializer = TestSerializer()
def test_initial(self):
"""
Initial values should be included when serializing a new representation.
"""
assert self.serializer.data == {
'initial_field': 123,
'blank_field': None
}
class TestLabel:
def setup(self):
class TestSerializer(serializers.Serializer):
labeled = serializers.IntegerField(label='My label')
self.serializer = TestSerializer()
def test_label(self):
"""
A field's label may be set with the `label` argument.
"""
fields = self.serializer.fields
assert fields['labeled'].label == 'My label'
class TestInvalidErrorKey:
def setup(self):
class ExampleField(serializers.Field):
def to_native(self, data):
self.fail('incorrect')
self.field = ExampleField()
def test_invalid_error_key(self):
"""
If a field raises a validation error, but does not have a corresponding
error message, then raise an appropriate assertion error.
"""
with pytest.raises(AssertionError) as exc_info:
self.field.to_native(123)
expected = (
'ValidationError raised by `ExampleField`, but error key '
'`incorrect` does not exist in the `error_messages` dictionary.'
)
assert str(exc_info.value) == expected
class TestBooleanHTMLInput:
def test_empty_html_checkbox(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField.
"""
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField()
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
def test_empty_html_checkbox_not_required(self):
"""
HTML checkboxes do not send any value, but should be treated
as `False` by BooleanField, even if the field is required=False.
"""
class TestSerializer(serializers.Serializer):
archived = serializers.BooleanField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'archived': False}
class TestHTMLInput:
def test_empty_html_charfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(default='happy')
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 'happy'}
def test_empty_html_charfield_without_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_without_default_not_required(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_blank=True, required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_integerfield(self):
class TestSerializer(serializers.Serializer):
message = serializers.IntegerField(default=123)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': 123}
def test_empty_html_uuidfield_with_default(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(default=uuid.uuid4)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data.keys()) == ['message']
def test_empty_html_uuidfield_with_optional(self):
class TestSerializer(serializers.Serializer):
message = serializers.UUIDField(required=False)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert list(serializer.validated_data.keys()) == []
def test_empty_html_charfield_allow_null(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': None}
def test_empty_html_datefield_allow_null(self):
class TestSerializer(serializers.Serializer):
expiry = serializers.DateField(allow_null=True)
serializer = TestSerializer(data=QueryDict('expiry='))
assert serializer.is_valid()
assert serializer.validated_data == {'expiry': None}
def test_empty_html_charfield_allow_null_allow_blank(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(allow_null=True, allow_blank=True)
serializer = TestSerializer(data=QueryDict('message='))
assert serializer.is_valid()
assert serializer.validated_data == {'message': ''}
def test_empty_html_charfield_required_false(self):
class TestSerializer(serializers.Serializer):
message = serializers.CharField(required=False)
serializer = TestSerializer(data=QueryDict(''))
assert serializer.is_valid()
assert serializer.validated_data == {}
def test_querydict_list_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&scores=3'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1, 3]}
def test_querydict_list_input_only_one_input(self):
class TestSerializer(serializers.Serializer):
scores = serializers.ListField(child=serializers.IntegerField())
serializer = TestSerializer(data=QueryDict('scores=1&'))
assert serializer.is_valid()
assert serializer.validated_data == {'scores': [1]}
class TestCreateOnlyDefault:
def setup(self):
default = serializers.CreateOnlyDefault('2001-01-01')
class TestSerializer(serializers.Serializer):
published = serializers.HiddenField(default=default)
text = serializers.CharField()
self.Serializer = TestSerializer
def test_create_only_default_is_provided(self):
serializer = self.Serializer(data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example', 'published': '2001-01-01'
}
def test_create_only_default_is_not_provided_on_update(self):
instance = {
'text': 'example', 'published': '2001-01-01'
}
serializer = self.Serializer(instance, data={'text': 'example'})
assert serializer.is_valid()
assert serializer.validated_data == {
'text': 'example',
}
def test_create_only_default_callable_sets_context(self):
"""
CreateOnlyDefault instances with a callable default should set_context
on the callable if possible
"""
class TestCallableDefault:
def set_context(self, serializer_field):
self.field = serializer_field
def __call__(self):
return "success" if hasattr(self, 'field') else "failure"
class TestSerializer(serializers.Serializer):
context_set = serializers.CharField(default=serializers.CreateOnlyDefault(TestCallableDefault()))
serializer = TestSerializer(data={})
assert serializer.is_valid()
assert serializer.validated_data['context_set'] == 'success'
# Tests for field input and output values.
# ----------------------------------------
def get_items(mapping_or_list_of_two_tuples):
# Tests accept either lists of two tuples, or dictionaries.
if isinstance(mapping_or_list_of_two_tuples, dict):
# {value: expected}
return mapping_or_list_of_two_tuples.items()
# [(value, expected), ...]
return mapping_or_list_of_two_tuples
class FieldValues:
"""
Base class for testing valid and invalid input values.
"""
def test_valid_inputs(self):
"""
Ensure that valid values return the expected validated data.
"""
for input_value, expected_output in get_items(self.valid_inputs):
assert self.field.run_validation(input_value) == expected_output
def test_invalid_inputs(self):
"""
Ensure that invalid values raise the expected validation error.
"""
for input_value, expected_failure in get_items(self.invalid_inputs):
with pytest.raises(serializers.ValidationError) as exc_info:
self.field.run_validation(input_value)
assert exc_info.value.detail == expected_failure
def test_outputs(self):
for output_value, expected_output in get_items(self.outputs):
assert self.field.to_representation(output_value) == expected_output
# Boolean types...
class TestBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
None: ['This field may not be null.']
}
outputs = {
'true': True,
'false': False,
'1': True,
'0': False,
1: True,
0: False,
True: True,
False: False,
'other': True
}
field = serializers.BooleanField()
def test_disallow_unhashable_collection_types(self):
inputs = (
[],
{},
)
field = serializers.BooleanField()
for input_value in inputs:
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(input_value)
expected = ['"{0}" is not a valid boolean.'.format(input_value)]
assert exc_info.value.detail == expected
class TestNullBooleanField(FieldValues):
"""
Valid and invalid values for `BooleanField`.
"""
valid_inputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None
}
invalid_inputs = {
'foo': ['"foo" is not a valid boolean.'],
}
outputs = {
'true': True,
'false': False,
'null': None,
True: True,
False: False,
None: None,
'other': True
}
field = serializers.NullBooleanField()
# String types...
class TestCharField(FieldValues):
"""
Valid and invalid values for `CharField`.
"""
valid_inputs = {
1: '1',
'abc': 'abc'
}
invalid_inputs = {
'': ['This field may not be blank.']
}
outputs = {
1: '1',
'abc': 'abc'
}
field = serializers.CharField()
def test_trim_whitespace_default(self):
field = serializers.CharField()
assert field.to_internal_value(' abc ') == 'abc'
def test_trim_whitespace_disabled(self):
field = serializers.CharField(trim_whitespace=False)
assert field.to_internal_value(' abc ') == ' abc '
def test_disallow_blank_with_trim_whitespace(self):
field = serializers.CharField(allow_blank=False, trim_whitespace=True)
with pytest.raises(serializers.ValidationError) as exc_info:
field.run_validation(' ')
assert exc_info.value.detail == ['This field may not be blank.']
class TestEmailField(FieldValues):
"""
Valid and invalid values for `EmailField`.
"""
valid_inputs = {
'example@example.com': 'example@example.com',
' example@example.com ': 'example@example.com',
}
invalid_inputs = {
'examplecom': ['Enter a valid email address.']
}
outputs = {}
field = serializers.EmailField()
class TestRegexField(FieldValues):
"""
Valid and invalid values for `RegexField`.
"""
valid_inputs = {
'a9': 'a9',
}
invalid_inputs = {
'A9': ["This value does not match the required pattern."]
}
outputs = {}
field = serializers.RegexField(regex='[a-z][0-9]')
class TestSlugField(FieldValues):
"""
Valid and invalid values for `SlugField`.
"""
valid_inputs = {
'slug-99': 'slug-99',
}
invalid_inputs = {
'slug 99': ['Enter a valid "slug" consisting of letters, numbers, underscores or hyphens.']
}
outputs = {}
field = serializers.SlugField()
class TestURLField(FieldValues):
"""
Valid and invalid values for `URLField`.
"""
valid_inputs = {
'http://example.com': 'http://example.com',
}
invalid_inputs = {
'example.com': ['Enter a valid URL.']
}
outputs = {}
field = serializers.URLField()
class TestUUIDField(FieldValues):
"""
Valid and invalid values for `UUIDField`.
"""
valid_inputs = {
'825d7aeb-05a9-45b5-a5b7-05df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'825d7aeb05a945b5a5b705df87923cda': uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'),
'urn:uuid:213b7d9b-244f-410d-828c-dabce7a2615d': uuid.UUID('213b7d9b-244f-410d-828c-dabce7a2615d'),
284758210125106368185219588917561929842: uuid.UUID('d63a6fb6-88d5-40c7-a91c-9edf73283072')
}
invalid_inputs = {
'825d7aeb-05a9-45b5-a5b7': ['"825d7aeb-05a9-45b5-a5b7" is not a valid UUID.'],
(1, 2, 3): ['"(1, 2, 3)" is not a valid UUID.']
}
outputs = {
uuid.UUID('825d7aeb-05a9-45b5-a5b7-05df87923cda'): '825d7aeb-05a9-45b5-a5b7-05df87923cda'
}
field = serializers.UUIDField()
def _test_format(self, uuid_format, formatted_uuid_0):
field = serializers.UUIDField(format=uuid_format)
assert field.to_representation(uuid.UUID(int=0)) == formatted_uuid_0
assert field.to_internal_value(formatted_uuid_0) == uuid.UUID(int=0)
def test_formats(self):
self._test_format('int', 0)
self._test_format('hex_verbose', '00000000-0000-0000-0000-000000000000')
self._test_format('urn', 'urn:uuid:00000000-0000-0000-0000-000000000000')
self._test_format('hex', '0' * 32)
class TestIPAddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
'2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334',
'2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652',
'2001:cdba::3257:9652': '2001:cdba::3257:9652'
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 or IPv6 address.'],
'127.122.111.2231': ['Enter a valid IPv4 or IPv6 address.'],
'2001:::9652': ['Enter a valid IPv4 or IPv6 address.'],
'2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'],
}
outputs = {}
field = serializers.IPAddressField()
class TestIPv4AddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'127.0.0.1': '127.0.0.1',
'192.168.33.255': '192.168.33.255',
}
invalid_inputs = {
'127001': ['Enter a valid IPv4 address.'],
'127.122.111.2231': ['Enter a valid IPv4 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv4')
class TestIPv6AddressField(FieldValues):
"""
Valid and invalid values for `IPAddressField`
"""
valid_inputs = {
'2001:0db8:85a3:0042:1000:8a2e:0370:7334': '2001:db8:85a3:42:1000:8a2e:370:7334',
'2001:cdba:0:0:0:0:3257:9652': '2001:cdba::3257:9652',
'2001:cdba::3257:9652': '2001:cdba::3257:9652'
}
invalid_inputs = {
'2001:::9652': ['Enter a valid IPv4 or IPv6 address.'],
'2001:0db8:85a3:0042:1000:8a2e:0370:73341': ['Enter a valid IPv4 or IPv6 address.'],
}
outputs = {}
field = serializers.IPAddressField(protocol='IPv6')
class TestFilePathField(FieldValues):
"""
Valid and invalid values for `FilePathField`
"""
valid_inputs = {
__file__: __file__,
}
invalid_inputs = {
'wrong_path': ['"wrong_path" is not a valid path choice.']
}
outputs = {
}
field = serializers.FilePathField(
path=os.path.abspath(os.path.dirname(__file__))
)
# Number types...
class TestIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField`.
"""
valid_inputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0,
'1.0': 1
}
invalid_inputs = {
0.5: ['A valid integer is required.'],
'abc': ['A valid integer is required.'],
'0.5': ['A valid integer is required.']
}
outputs = {
'1': 1,
'0': 0,
1: 1,
0: 0,
1.0: 1,
0.0: 0
}
field = serializers.IntegerField()
class TestMinMaxIntegerField(FieldValues):
"""
Valid and invalid values for `IntegerField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
}
invalid_inputs = {
0: ['Ensure this value is greater than or equal to 1.'],
4: ['Ensure this value is less than or equal to 3.'],
'0': ['Ensure this value is greater than or equal to 1.'],
'4': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.IntegerField(min_value=1, max_value=3)
class TestFloatField(FieldValues):
"""
Valid and invalid values for `FloatField`.
"""
valid_inputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
invalid_inputs = {
'abc': ["A valid number is required."]
}
outputs = {
'1': 1.0,
'0': 0.0,
1: 1.0,
0: 0.0,
1.0: 1.0,
0.0: 0.0,
}
field = serializers.FloatField()
class TestMinMaxFloatField(FieldValues):
"""
Valid and invalid values for `FloatField` with min and max limits.
"""
valid_inputs = {
'1': 1,
'3': 3,
1: 1,
3: 3,
1.0: 1.0,
3.0: 3.0,
}
invalid_inputs = {
0.9: ['Ensure this value is greater than or equal to 1.'],
3.1: ['Ensure this value is less than or equal to 3.'],
'0.0': ['Ensure this value is greater than or equal to 1.'],
'3.1': ['Ensure this value is less than or equal to 3.'],
}
outputs = {}
field = serializers.FloatField(min_value=1, max_value=3)
class TestDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField`.
"""
valid_inputs = {
'12.3': Decimal('12.3'),
'0.1': Decimal('0.1'),
10: Decimal('10'),
0: Decimal('0'),
12.3: Decimal('12.3'),
0.1: Decimal('0.1'),
'2E+1': Decimal('20'),
}
invalid_inputs = (
('abc', ["A valid number is required."]),
(Decimal('Nan'), ["A valid number is required."]),
(Decimal('Inf'), ["A valid number is required."]),
('12.345', ["Ensure that there are no more than 3 digits in total."]),
(200000000000.0, ["Ensure that there are no more than 3 digits in total."]),
('0.01', ["Ensure that there are no more than 1 decimal places."]),
(123, ["Ensure that there are no more than 2 digits before the decimal point."]),
('2E+2', ["Ensure that there are no more than 2 digits before the decimal point."])
)
outputs = {
'1': '1.0',
'0': '0.0',
'1.09': '1.1',
'0.04': '0.0',
1: '1.0',
0: '0.0',
Decimal('1.0'): '1.0',
Decimal('0.0'): '0.0',
Decimal('1.09'): '1.1',
Decimal('0.04'): '0.0'
}
field = serializers.DecimalField(max_digits=3, decimal_places=1)
class TestMinMaxDecimalField(FieldValues):
"""
Valid and invalid values for `DecimalField` with min and max limits.
"""
valid_inputs = {
'10.0': Decimal('10.0'),
'20.0': Decimal('20.0'),
}
invalid_inputs = {
'9.9': ['Ensure this value is greater than or equal to 10.'],
'20.1': ['Ensure this value is less than or equal to 20.'],
}
outputs = {}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
min_value=10, max_value=20
)
class TestNoStringCoercionDecimalField(FieldValues):
"""
Output values for `DecimalField` with `coerce_to_string=False`.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
1.09: Decimal('1.1'),
0.04: Decimal('0.0'),
'1.09': Decimal('1.1'),
'0.04': Decimal('0.0'),
Decimal('1.09'): Decimal('1.1'),
Decimal('0.04'): Decimal('0.0'),
}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
coerce_to_string=False
)
# Date & time serializers...
class TestDateField(FieldValues):
"""
Valid and invalid values for `DateField`.
"""
valid_inputs = {
'2001-01-01': datetime.date(2001, 1, 1),
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1),
}
invalid_inputs = {
'abc': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
'2001-99-99': ['Date has wrong format. Use one of these formats instead: YYYY[-MM[-DD]].'],
datetime.datetime(2001, 1, 1, 12, 00): ['Expected a date but got a datetime.'],
}
outputs = {
datetime.date(2001, 1, 1): '2001-01-01',
'2001-01-01': '2001-01-01',
None: None,
'': None,
}
field = serializers.DateField()
class TestCustomInputFormatDateField(FieldValues):
"""
Valid and invalid values for `DateField` with a cutom input format.
"""
valid_inputs = {
'1 Jan 2001': datetime.date(2001, 1, 1),
}
invalid_inputs = {
'2001-01-01': ['Date has wrong format. Use one of these formats instead: DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateField(input_formats=['%d %b %Y'])
class TestCustomOutputFormatDateField(FieldValues):
"""
Values for `DateField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): '01 Jan 2001'
}
field = serializers.DateField(format='%d %b %Y')
class TestNoOutputFormatDateField(FieldValues):
"""
Values for `DateField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.date(2001, 1, 1): datetime.date(2001, 1, 1)
}
field = serializers.DateField(format=None)
class TestDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField`.
"""
valid_inputs = {
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
'2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()),
# Django 1.4 does not support timezone string parsing.
'2001-01-01T14:00+01:00' if (django.VERSION > (1, 4)) else '2001-01-01T13:00Z': datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC())
}
invalid_inputs = {
'abc': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
'2001-99-99T99:00': ['Datetime has wrong format. Use one of these formats instead: YYYY-MM-DDThh:mm[:ss[.uuuuuu]][+HH:MM|-HH:MM|Z].'],
datetime.date(2001, 1, 1): ['Expected a datetime but got a date.'],
}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '2001-01-01T13:00:00',
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): '2001-01-01T13:00:00Z'
}
field = serializers.DateTimeField(default_timezone=timezone.UTC())
class TestCustomInputFormatDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with a cutom input format.
"""
valid_inputs = {
'1:35pm, 1 Jan 2001': datetime.datetime(2001, 1, 1, 13, 35, tzinfo=timezone.UTC()),
}
invalid_inputs = {
'2001-01-01T20:50': ['Datetime has wrong format. Use one of these formats instead: hh:mm[AM|PM], DD [Jan-Dec] YYYY.']
}
outputs = {}
field = serializers.DateTimeField(default_timezone=timezone.UTC(), input_formats=['%I:%M%p, %d %b %Y'])
class TestCustomOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '01:00PM, 01 Jan 2001',
}
field = serializers.DateTimeField(format='%I:%M%p, %d %b %Y')
class TestNoOutputFormatDateTimeField(FieldValues):
"""
Values for `DateTimeField` with no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): datetime.datetime(2001, 1, 1, 13, 00),
}
field = serializers.DateTimeField(format=None)
class TestNaiveDateTimeField(FieldValues):
"""
Valid and invalid values for `DateTimeField` with naive datetimes.
"""
valid_inputs = {
datetime.datetime(2001, 1, 1, 13, 00, tzinfo=timezone.UTC()): datetime.datetime(2001, 1, 1, 13, 00),
'2001-01-01 13:00': datetime.datetime(2001, 1, 1, 13, 00),
}
invalid_inputs = {}
outputs = {}
field = serializers.DateTimeField(default_timezone=None)
class TestTimeField(FieldValues):
"""
Valid and invalid values for `TimeField`.
"""
valid_inputs = {
'13:00': datetime.time(13, 00),
datetime.time(13, 00): datetime.time(13, 00),
}
invalid_inputs = {
'abc': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
'99:99': ['Time has wrong format. Use one of these formats instead: hh:mm[:ss[.uuuuuu]].'],
}
outputs = {
datetime.time(13, 00): '13:00:00'
}
field = serializers.TimeField()
class TestCustomInputFormatTimeField(FieldValues):
"""
Valid and invalid values for `TimeField` with a custom input format.
"""
valid_inputs = {
'1:00pm': datetime.time(13, 00),
}
invalid_inputs = {
'13:00': ['Time has wrong format. Use one of these formats instead: hh:mm[AM|PM].'],
}
outputs = {}
field = serializers.TimeField(input_formats=['%I:%M%p'])
class TestCustomOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): '01:00PM'
}
field = serializers.TimeField(format='%I:%M%p')
class TestNoOutputFormatTimeField(FieldValues):
"""
Values for `TimeField` with a no output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.time(13, 00): datetime.time(13, 00)
}
field = serializers.TimeField(format=None)
@pytest.mark.skipif(django.VERSION < (1, 8),
reason='DurationField is only available for django1.8+')
class TestDurationField(FieldValues):
"""
Valid and invalid values for `DurationField`.
"""
valid_inputs = {
'13': datetime.timedelta(seconds=13),
'3 08:32:01.000123': datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
'08:01': datetime.timedelta(minutes=8, seconds=1),
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123),
3600: datetime.timedelta(hours=1),
}
invalid_inputs = {
'abc': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
'3 08:32 01.123': ['Duration has wrong format. Use one of these formats instead: [DD] [HH:[MM:]]ss[.uuuuuu].'],
}
outputs = {
datetime.timedelta(days=3, hours=8, minutes=32, seconds=1, microseconds=123): '3 08:32:01.000123',
}
if django.VERSION >= (1, 8):
field = serializers.DurationField()
# Choice types...
class TestChoiceField(FieldValues):
"""
Valid and invalid values for `ChoiceField`.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'amazing': ['"amazing" is not a valid choice.']
}
outputs = {
'good': 'good',
'': '',
'amazing': 'amazing',
}
field = serializers.ChoiceField(
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
def test_allow_blank(self):
"""
If `allow_blank=True` then '' is a valid input.
"""
field = serializers.ChoiceField(
allow_blank=True,
choices=[
('poor', 'Poor quality'),
('medium', 'Medium quality'),
('good', 'Good quality'),
]
)
output = field.run_validation('')
assert output == ''
def test_allow_null(self):
"""
If `allow_null=True` then '' on HTML forms is treated as None.
"""
field = serializers.ChoiceField(
allow_null=True,
choices=[
1, 2, 3
]
)
field.field_name = 'example'
value = field.get_value(QueryDict('example='))
assert value is None
output = field.run_validation(None)
assert output is None
def test_iter_options(self):
"""
iter_options() should return a list of options and option groups.
"""
field = serializers.ChoiceField(
choices=[
('Numbers', ['integer', 'float']),
('Strings', ['text', 'email', 'url']),
'boolean'
]
)
items = list(field.iter_options())
assert items[0].start_option_group
assert items[0].label == 'Numbers'
assert items[1].value == 'integer'
assert items[2].value == 'float'
assert items[3].end_option_group
assert items[4].start_option_group
assert items[4].label == 'Strings'
assert items[5].value == 'text'
assert items[6].value == 'email'
assert items[7].value == 'url'
assert items[8].end_option_group
assert items[9].value == 'boolean'
class TestChoiceFieldWithType(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses an integer type,
instead of a char type.
"""
valid_inputs = {
'1': 1,
3: 3,
}
invalid_inputs = {
5: ['"5" is not a valid choice.'],
'abc': ['"abc" is not a valid choice.']
}
outputs = {
'1': 1,
1: 1
}
field = serializers.ChoiceField(
choices=[
(1, 'Poor quality'),
(2, 'Medium quality'),
(3, 'Good quality'),
]
)
class TestChoiceFieldWithListChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a flat list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(choices=('poor', 'medium', 'good'))
class TestChoiceFieldWithGroupedChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a grouped list for the
choices, rather than a list of pairs of (`value`, `description`).
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
('medium', 'Medium quality'),
),
),
('good', 'Good quality'),
]
)
class TestChoiceFieldWithMixedChoices(FieldValues):
"""
Valid and invalid values for a `Choice` field that uses a single paired or
grouped.
"""
valid_inputs = {
'poor': 'poor',
'medium': 'medium',
'good': 'good',
}
invalid_inputs = {
'awful': ['"awful" is not a valid choice.']
}
outputs = {
'good': 'good'
}
field = serializers.ChoiceField(
choices=[
(
'Category',
(
('poor', 'Poor quality'),
),
),
'medium',
('good', 'Good quality'),
]
)
class TestMultipleChoiceField(FieldValues):
"""
Valid and invalid values for `MultipleChoiceField`.
"""
valid_inputs = {
(): set(),
('aircon',): set(['aircon']),
('aircon', 'manual'): set(['aircon', 'manual']),
}
invalid_inputs = {
'abc': ['Expected a list of items but got type "str".'],
('aircon', 'incorrect'): ['"incorrect" is not a valid choice.']
}
outputs = [
(['aircon', 'manual', 'incorrect'], set(['aircon', 'manual', 'incorrect']))
]
field = serializers.MultipleChoiceField(
choices=[
('aircon', 'AirCon'),
('manual', 'Manual drive'),
('diesel', 'Diesel'),
]
)
def test_against_partial_and_full_updates(self):
field = serializers.MultipleChoiceField(choices=(('a', 'a'), ('b', 'b')))
field.partial = False
assert field.get_value(QueryDict({})) == []
field.partial = True
assert field.get_value(QueryDict({})) == rest_framework.fields.empty
class TestEmptyMultipleChoiceField(FieldValues):
"""
Invalid values for `MultipleChoiceField(allow_empty=False)`.
"""
valid_inputs = {
}
invalid_inputs = (
([], ['This selection may not be empty.']),
)
outputs = [
]
field = serializers.MultipleChoiceField(
choices=[
('consistency', 'Consistency'),
('availability', 'Availability'),
('partition', 'Partition tolerance'),
],
allow_empty=False
)
# File serializers...
class MockFile:
def __init__(self, name='', size=0, url=''):
self.name = name
self.size = size
self.url = url
def __eq__(self, other):
return (
isinstance(other, MockFile) and
self.name == other.name and
self.size == other.size and
self.url == other.url
)
class TestFileField(FieldValues):
"""
Values for `FileField`.
"""
valid_inputs = [
(MockFile(name='example', size=10), MockFile(name='example', size=10))
]
invalid_inputs = [
('invalid', ['The submitted data was not a file. Check the encoding type on the form.']),
(MockFile(name='example.txt', size=0), ['The submitted file is empty.']),
(MockFile(name='', size=10), ['No filename could be determined.']),
(MockFile(name='x' * 100, size=10), ['Ensure this filename has at most 10 characters (it has 100).'])
]
outputs = [
(MockFile(name='example.txt', url='/example.txt'), '/example.txt'),
('', None)
]
field = serializers.FileField(max_length=10)
class TestFieldFieldWithName(FieldValues):
"""
Values for `FileField` with a filename output instead of URLs.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = [
(MockFile(name='example.txt', url='/example.txt'), 'example.txt')
]
field = serializers.FileField(use_url=False)
# Stub out mock Django `forms.ImageField` class so we don't *actually*
# call into it's regular validation, or require PIL for testing.
class FailImageValidation(object):
def to_python(self, value):
raise serializers.ValidationError(self.error_messages['invalid_image'])
class PassImageValidation(object):
def to_python(self, value):
return value
class TestInvalidImageField(FieldValues):
"""
Values for an invalid `ImageField`.
"""
valid_inputs = {}
invalid_inputs = [
(MockFile(name='example.txt', size=10), ['Upload a valid image. The file you uploaded was either not an image or a corrupted image.'])
]
outputs = {}
field = serializers.ImageField(_DjangoImageField=FailImageValidation)
class TestValidImageField(FieldValues):
"""
Values for an valid `ImageField`.
"""
valid_inputs = [
(MockFile(name='example.txt', size=10), MockFile(name='example.txt', size=10))
]
invalid_inputs = {}
outputs = {}
field = serializers.ImageField(_DjangoImageField=PassImageValidation)
# Composite serializers...
class TestListField(FieldValues):
"""
Values for `ListField` with IntegerField as child.
"""
valid_inputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3]),
([], [])
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
([1, 2, 'error'], ['A valid integer is required.']),
({'one': 'two'}, ['Expected a list of items but got type "dict".'])
]
outputs = [
([1, 2, 3], [1, 2, 3]),
(['1', '2', '3'], [1, 2, 3])
]
field = serializers.ListField(child=serializers.IntegerField())
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.ListField(child=serializers.IntegerField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
def test_collection_types_are_invalid_input(self):
field = serializers.ListField(child=serializers.CharField())
input_value = ({'one': 'two'})
with pytest.raises(serializers.ValidationError) as exc_info:
field.to_internal_value(input_value)
assert exc_info.value.detail == ['Expected a list of items but got type "dict".']
class TestEmptyListField(FieldValues):
"""
Values for `ListField` with allow_empty=False flag.
"""
valid_inputs = {}
invalid_inputs = [
([], ['This list may not be empty.'])
]
outputs = {}
field = serializers.ListField(child=serializers.IntegerField(), allow_empty=False)
class TestUnvalidatedListField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
invalid_inputs = [
('not a list', ['Expected a list of items but got type "str".']),
]
outputs = [
([1, '2', True, [4, 5, 6]], [1, '2', True, [4, 5, 6]]),
]
field = serializers.ListField()
class TestDictField(FieldValues):
"""
Values for `ListField` with CharField as child.
"""
valid_inputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
invalid_inputs = [
({'a': 1, 'b': None}, ['This field may not be null.']),
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': '2', 3: 3}, {'a': '1', 'b': '2', '3': '3'}),
]
field = serializers.DictField(child=serializers.CharField())
def test_no_source_on_child(self):
with pytest.raises(AssertionError) as exc_info:
serializers.DictField(child=serializers.CharField(source='other'))
assert str(exc_info.value) == (
"The `source` argument is not meaningful when applied to a `child=` field. "
"Remove `source=` from the field declaration."
)
class TestUnvalidatedDictField(FieldValues):
"""
Values for `ListField` with no `child` argument.
"""
valid_inputs = [
({'a': 1, 'b': [4, 5, 6], 1: 123}, {'a': 1, 'b': [4, 5, 6], '1': 123}),
]
invalid_inputs = [
('not a dict', ['Expected a dictionary of items but got type "str".']),
]
outputs = [
({'a': 1, 'b': [4, 5, 6]}, {'a': 1, 'b': [4, 5, 6]}),
]
field = serializers.DictField()
class TestJSONField(FieldValues):
"""
Values for `JSONField`.
"""
valid_inputs = [
({
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}, {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}),
]
invalid_inputs = [
({'a': set()}, ['Value must be valid JSON.']),
]
outputs = [
({
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': 3
}, {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': 3
}),
]
field = serializers.JSONField()
class TestBinaryJSONField(FieldValues):
"""
Values for `JSONField` with binary=True.
"""
valid_inputs = [
(b'{"a": 1, "3": null, "b": ["some", "list", true, 1.23]}', {
'a': 1,
'b': ['some', 'list', True, 1.23],
'3': None
}),
]
invalid_inputs = [
('{"a": "unterminated string}', ['Value must be valid JSON.']),
]
outputs = [
(['some', 'list', True, 1.23], b'["some", "list", true, 1.23]'),
]
field = serializers.JSONField(binary=True)
# Tests for FieldField.
# ---------------------
class MockRequest:
def build_absolute_uri(self, value):
return 'http://example.com' + value
class TestFileFieldContext:
def test_fully_qualified_when_request_in_context(self):
field = serializers.FileField(max_length=10)
field._context = {'request': MockRequest()}
obj = MockFile(name='example.txt', url='/example.txt')
value = field.to_representation(obj)
assert value == 'http://example.com/example.txt'
# Tests for SerializerMethodField.
# --------------------------------
class TestSerializerMethodField:
def test_serializer_method_field(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField()
def get_example_field(self, obj):
return 'ran get_example_field(%d)' % obj['example_field']
serializer = ExampleSerializer({'example_field': 123})
assert serializer.data == {
'example_field': 'ran get_example_field(123)'
}
def test_redundant_method_name(self):
class ExampleSerializer(serializers.Serializer):
example_field = serializers.SerializerMethodField('get_example_field')
with pytest.raises(AssertionError) as exc_info:
ExampleSerializer().fields
assert str(exc_info.value) == (
"It is redundant to specify `get_example_field` on "
"SerializerMethodField 'example_field' in serializer "
"'ExampleSerializer', because it is the same as the default "
"method name. Remove the `method_name` argument."
)
| |
import io
from unittest.mock import Mock
import pytest
from mitmproxy.net import http
from mitmproxy.net.http import http1
from mitmproxy import exceptions
from pathod import pathoc, language
from pathod.protocols.http2 import HTTP2StateProtocol
from mitmproxy.test import tutils
from . import tservers
def test_response():
r = http.Response(b"HTTP/1.1", 200, b"Message", {}, None, None)
assert repr(r)
class PathocTestDaemon(tservers.DaemonTests):
def tval(self, requests, timeout=None, showssl=False, **kwargs):
s = io.StringIO()
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
ssl=self.ssl,
fp=s,
**kwargs
)
with c.connect(showssl=showssl, fp=s):
if timeout:
c.settimeout(timeout)
for i in requests:
r = next(language.parse_pathoc(i))
if kwargs.get("explain"):
r = r.freeze(language.Settings())
try:
c.request(r)
except exceptions.NetlibException:
pass
self.d.wait_for_silence()
return s.getvalue()
class TestDaemonSSL(PathocTestDaemon):
ssl = True
ssloptions = dict(
request_client_cert=True,
sans=[b"test1.com", b"test2.com"],
alpn_select=b'h2',
)
def test_sni(self):
self.tval(
["get:/p/200"],
sni="foobar.com"
)
log = self.d.log()
assert log[0]["request"]["sni"] == "foobar.com"
def test_showssl(self):
assert "certificate chain" in self.tval(["get:/p/200"], showssl=True)
def test_clientcert(self):
self.tval(
["get:/p/200"],
clientcert=tutils.test_data.path("pathod/data/clientcert/client.pem"),
)
log = self.d.log()
assert log[0]["request"]["clientcert"]["keyinfo"]
def test_http2_without_ssl(self):
fp = io.StringIO()
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
use_http2=True,
ssl=False,
fp=fp
)
with pytest.raises(NotImplementedError):
c.connect()
class TestDaemon(PathocTestDaemon):
ssl = False
def test_ssl_error(self):
c = pathoc.Pathoc(("127.0.0.1", self.d.port), ssl=True, fp=None)
try:
with c.connect():
pass
except Exception as e:
assert "SSL" in str(e)
else:
raise AssertionError("No exception raised.")
def test_showssl(self):
assert "certificate chain" not in self.tval(
["get:/p/200"],
showssl=True)
def test_ignorecodes(self):
assert "200" in self.tval(["get:'/p/200:b@1'"])
assert "200" in self.tval(["get:'/p/200:b@1'"])
assert "200" in self.tval(["get:'/p/200:b@1'"])
assert "200" not in self.tval(["get:'/p/200:b@1'"], ignorecodes=[200])
assert "200" not in self.tval(
["get:'/p/200:b@1'"],
ignorecodes=[
200,
201])
assert "202" in self.tval(["get:'/p/202:b@1'"], ignorecodes=[200, 201])
def _test_timeout(self):
assert "Timeout" in self.tval(["get:'/p/200:p0,100'"], timeout=0.01)
assert "HTTP" in self.tval(
["get:'/p/200:p5,100'"],
showresp=True,
timeout=1
)
assert "HTTP" not in self.tval(
["get:'/p/200:p3,100'"],
showresp=True,
timeout=1,
ignoretimeout=True
)
def test_showresp(self):
reqs = ["get:/p/200:da", "get:/p/200:da"]
assert self.tval(reqs).count("200 OK") == 2
assert self.tval(reqs, showresp=True).count("HTTP/1.1 200 OK") == 2
assert self.tval(
reqs, showresp=True, hexdump=True
).count("0000000000") == 2
def test_showresp_httperr(self):
v = self.tval(["get:'/p/200:d20'"], showresp=True, showsummary=True)
assert "Invalid header" in v
assert "HTTP/" in v
def test_explain(self):
reqs = ["get:/p/200:b@100"]
assert "b@100" not in self.tval(reqs, explain=True)
def test_showreq(self):
reqs = ["get:/p/200:da", "get:/p/200:da"]
assert self.tval(reqs, showreq=True).count("GET /p/200") == 2
assert self.tval(
reqs, showreq=True, hexdump=True
).count("0000000000") == 2
def test_conn_err(self):
assert "Invalid server response" in self.tval(["get:'/p/200:d2'"])
def test_websocket_shutdown(self):
self.tval(["ws:/"])
def test_wait_finish(self):
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
fp=None,
ws_read_limit=1
)
with c.connect():
c.request("ws:/")
c.request("wf:f'wf'")
# This should read a frame and close the websocket reader
assert len([i for i in c.wait(timeout=5, finish=False)]) == 1
assert not [i for i in c.wait(timeout=0)]
def test_connect_fail(self):
to = ("foobar", 80)
c = pathoc.Pathoc(("127.0.0.1", self.d.port), fp=None)
c.rfile, c.wfile = io.BytesIO(), io.BytesIO()
with pytest.raises(Exception, match="CONNECT failed"):
c.http_connect(to)
c.rfile = io.BytesIO(
b"HTTP/1.1 500 OK\r\n"
)
with pytest.raises(Exception, match="CONNECT failed"):
c.http_connect(to)
c.rfile = io.BytesIO(
b"HTTP/1.1 200 OK\r\n"
)
c.http_connect(to)
def test_socks_connect(self):
to = ("foobar", 80)
c = pathoc.Pathoc(("127.0.0.1", self.d.port), fp=None)
c.rfile, c.wfile = tutils.treader(b""), io.BytesIO()
with pytest.raises(pathoc.PathocError):
c.socks_connect(to)
c.rfile = tutils.treader(
b"\x05\xEE"
)
with pytest.raises(Exception, match="SOCKS without authentication"):
c.socks_connect(("example.com", 0xDEAD))
c.rfile = tutils.treader(
b"\x05\x00" +
b"\x05\xEE\x00\x03\x0bexample.com\xDE\xAD"
)
with pytest.raises(Exception, match="SOCKS server error"):
c.socks_connect(("example.com", 0xDEAD))
c.rfile = tutils.treader(
b"\x05\x00" +
b"\x05\x00\x00\x03\x0bexample.com\xDE\xAD"
)
c.socks_connect(("example.com", 0xDEAD))
class TestDaemonHTTP2(PathocTestDaemon):
ssl = True
explain = False
def test_http2(self):
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
fp=None,
ssl=True,
use_http2=True,
)
assert isinstance(c.protocol, HTTP2StateProtocol)
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
)
assert c.protocol == http1
def test_http2_alpn(self):
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
fp=None,
ssl=True,
use_http2=True,
http2_skip_connection_preface=True,
)
tmp_convert_to_tls = c.convert_to_tls
c.convert_to_tls = Mock()
c.convert_to_tls.side_effect = tmp_convert_to_tls
with c.connect():
_, kwargs = c.convert_to_tls.call_args
assert set(kwargs['alpn_protos']) == set([b'http/1.1', b'h2'])
def test_request(self):
c = pathoc.Pathoc(
("127.0.0.1", self.d.port),
fp=None,
ssl=True,
use_http2=True,
)
with c.connect():
resp = c.request("get:/p/200")
assert resp.status_code == 200
| |
# SQL-metodene
import MySQLdb
import itertools
import json
import time
from sendMail import *
debug = False
reply = ''
splitChar = '#'
curMilliTime = lambda: int(round(time.time() * 1000))
lastChangedTime = 0
def interpreter (data):
raw = data.split(splitChar)
command = raw[0]
data = eval(raw[1])
if (command == 'k2pj39as9d0uo34jkh41'): # LOGIN
username = data[0]
password = data[1]
reply = login(username, password)
elif (command == 'y2sg59sleuc5el70sjr3'): # CREATE USER
username = "'" + data[0] + "'"
password = "'" + data[1] + "'"
epost = "'" + data[2] + "'"
if (data[3] == "null"):
fornavn = "null"
else:
fornavn = "'" + data [3] + "'"
if (data[4] == "null"):
etternavn = "null"
else:
etternavn = "'" + data [4] + "'"
reply = createUser(username, password, epost, fornavn, etternavn)
elif (command == 'u4sl29fjanz680slla0p'): # GET ALL USERS
reply = getAllUsers()
elif (command == 'o35n2b1hsk1oa901jsnt'): # GET SPECIFIC USER
userString = data
reply = getSpecificUser(userString)
elif (command == 'j2az937ls9mey36z2h0l'): # GET ALL GROUPS
reply = getAllGroups()
elif (command == 'h4ks827db5kz791ke2n5'): # GET FILTERED ROOMS
start = str(data[0])
slutt = str(data[1])
storrelse = str(data[2])
reply = getOccupiedRooms(start, slutt, storrelse)
elif (command == 'a92mfdy37sk9senc7smw'): # CREATE APPOINTMENT
navn = "'" + data[0] + "'"
start = data[1]
slutt = data[2]
if (data[3] == "null"):
beskrivelse = "null"
else:
beskrivelse = "'" + data[3] + "'"
if (data[4] == "null"):
sted = "null"
else:
sted = "'" + data[4] + "'"
if (data[5] == "null"):
moteromID = "null"
else:
moteromID = data[5]
lastChangedTime = curMilliTime
reply = createAppointment(navn, start, slutt, beskrivelse, sted, moteromID)
elif (command == 'g2z8fvj39s3mo899asd3'): # CREATE GROUP
navn = "'" + data[0] + "'"
if (data[1] == "null"):
supergruppeID = "null"
else:
supergruppeID = data[1]
reply = createGroup(navn, supergruppeID)
elif (command == 'g7sj34nd8xjake4hr8nf'): # GET SPECIFIC GROUP
navn = "'" + data + "'"
reply = getSpecificGroup(navn)
elif (command == 'z9sh38fjka5hj79mvgh1'): # CREATE GROUP MEMBER
brukerID = str(data[0])
gruppeID = str(data[1])
admin = str(data[2])
reply = createGroupMember(brukerID, gruppeID, admin)
elif (command == 't37zhenk58slro38za7q'): # CREATE APPOINTMENT MEMBER
brukerID = str(data[0])
avtaleID = str(data[1])
deltar = str(data[2])
admin = str(data[3])
varsel = str(data[4])
reply = createAppointmentMember(brukerID, avtaleID, deltar, admin, varsel)
elif (command == 'f2ks94lfurb68z52k9ah'): # GET APPOINTMENTS
brukerID = str(data[0])
start = str(data[1])
slutt = str(data[2])
reply = getAppointments(brukerID, start, slutt)
elif (command == 's8cj2jak602lfun4h6z8'): # GET SPECIFIC APPOINTMENT
avtaleID = str(data)
reply = getSpecificAppointment(avtaleID)
elif (command == 'p7zf26a5hem79gul4hy7'): # GET ALL MEMBER GROUPS
brukerID = str(data)
reply = getAllMemberGroups(brukerID)
elif (command == 'a74hn9kl2k4lxn57cgh3'): # GET GROUP MEMBERS
navn = "'" + str(data) + "'"
reply = getGroupMembers(navn)
elif (command == 'p7sg3bzv32jtuhf7k6nr'): # GET APPOINTMENT MEMBER
brukerID = str(data[0])
avtaleID = str(data[1])
reply = getAppointmentMember(brukerID, avtaleID)
elif (command == 'q8sh3nd8vhe0hit43kxh'): # UPDATE AVTALEBRUKER
brukerID = str(data[0])
avtaleID = str(data[1])
deltar = str(data[2])
varsel = str(data[3])
reply = updateAppointmentUser(brukerID, avtaleID, deltar, varsel)
elif (command == 'x82m4jf7ch4dk7h6fn4k'): # GET ATTENDANCE AND ALERT
brukerID = str(data[0])
avtaleID = str(data[1])
reply = getAttendanceAlert(brukerID, avtaleID)
elif (command == 'p7sg3nj86sk64hn2loc5'): # GET ATTENDING USERS
avtaleID = str(data)
reply = getAttendingUsers(avtaleID)
elif (command == 'z83mdk48dhw7sn51koel'): # DELE APPOINTMENT
avtaleID = str(data)
reply = deleteAppointment(avtaleID)
elif (command == 'q83j5c8m3k0s3fie8d2h'): # GET JOINED MEMBERS
gruppeNavn = str(data)
reply = getJoinedMembers(gruppeNavn)
elif (command == 't3js947ch4n57ak92lem'): # GET ALL APPOINTMENTS FOR ADMIN
brukerID = str(data)
reply = getAllAppointmentsForAdmin(brukerID)
elif (command == 'k39sh21sk97dhek8usya'): # GET ROOM WITH ID
moteromID = str(data)
reply = getRoomWithID(moteromID)
elif (command == 'v4nsk2jd8ut67xmf8ke3'): # GET CHANGE STATUS
lastUpdatedTime = long(data)
global lastChangedTime
if (lastUpdatedTime < lastChangedTime):
reply = '[{"didChange": ' + "\"Yes\"" + '}]'
else:
reply = '[{"didChange": ' + "\"No\"" + '}]'
elif (command == 'a83kdu8dbe5lg7c39qu3'): # GET APPOINTMENTS COUNT
reply = getAppointmentsCount()
elif (command == 'b37chw89sk3gy2nfy7i6'): # GET GROUPS COUNT
reply = getGroupsCount()
elif (command == 'c3b8kam28duh3j7cg4n7'): # GET MEMBERS COUNT
reply = getMembersCount()
elif (command == 'j3usge51j7s68luc62net'): # GET NEW PASSWORD
email = "'" + str(data) + "'"
reply = getUserWithEmail(email)
elif (command == 'n3hs7xk489fjaqi3nf6h'): # SEND PASSWORD
epost = "'" + str(data[0]) + "'"
brukernavn = "'" + str(data[1]) + "'"
passord = "'" + str(data[2]) + "'"
sendMail(epost, brukernavn, passord)
reply = "EmailIsSend"
print reply
return command + splitChar + reply
def validateUser(data):
if data != []:
return True
return False
def login(username, password):
data = executeSQL("SELECT brukerID, brukernavn, passord, fornavn, etternavn, epost FROM BRUKER WHERE brukernavn ='" \
+ username + "' AND passord ='" + password + "'")
return data
def createUser(username, password, email, firstname, lastname):
data = executeSQL("INSERT INTO BRUKER (brukernavn, passord, epost, fornavn, etternavn) VALUES (" \
+ username + ", " + password + ", " + email + ", " + firstname + ", " + lastname + ")")
return data
def getSpecificUser(streng):
data = executeSQL("SELECT brukerID, brukernavn, fornavn, etternavn FROM BRUKER " \
"WHERE brukernavn LIKE '%" + streng + "%' OR fornavn LIKE '%" + streng + "%' OR etternavn LIKE '%" + streng + "%'")
return data
def getAppointmentMember(brukerID, avtaleID):
data = executeSQL("SELECT * FROM AVTALEBRUKER WHERE avtaleID = " + avtaleID + " AND brukerID = " + brukerID)
return data
def getAllUsers():
data = executeSQL("SELECT brukerID, brukernavn, fornavn, etternavn FROM BRUKER")
return data
def getOccupiedRooms(start, slutt, storrelse):
data = executeSQL( "SELECT moteromID, sted, navn, storrelse " \
"FROM MOTEROM " \
"WHERE storrelse >= " + storrelse + " AND moteromID NOT IN (" \
"SELECT moteromID " \
"FROM MOTEROM " \
"WHERE moteromID IN (" \
"SELECT moteromID " \
"FROM AVTALE " \
"WHERE (start <= " + start + " AND slutt >= " + start + ") OR " \
"(start <= " + slutt + " AND slutt >= " + slutt + ") OR " \
"(start > " + start + " AND slutt < " + slutt + ")))")
return data
def getAllGroups():
data = executeSQL("SELECT gruppeID, navn FROM GRUPPE")
return data
def getAllMemberGroups(brukerID):
print "SELECT gruppeID, navn FROM GRUPPE " \
"WHERE gruppeID IN (" \
"SELECT gruppeID FROM GRUPPEBRUKER WHERE brukerID = " + brukerID + ")"
data = executeSQL( "SELECT gruppeID, navn FROM GRUPPE " \
"WHERE gruppeID IN (" \
"SELECT gruppeID FROM GRUPPEBRUKER WHERE brukerID = " + brukerID + ")")
return data
def getGroupMembers(navn):
data = executeSQL( "SELECT * FROM GRUPPEBRUKER " \
"WHERE gruppeID IN (" \
"SELECT gruppeID FROM GRUPPE WHERE navn = " + navn + ")")
return data
def getSpecificGroup(navn):
data = executeSQL("SELECT gruppeID FROM GRUPPE WHERE navn = " + navn)
return data
def createGroup(navn, supergruppeID):
data = executeSQL( "INSERT INTO GRUPPE (navn, supergruppeID) VALUES (" \
+ navn + ", " + supergruppeID + ")")
return data
def createGroupMember(brukerID, gruppeID, admin):
data = executeSQL( "INSERT INTO GRUPPEBRUKER (brukerID, gruppeID, admin) VALUES (" \
+ brukerID + ", " + gruppeID + ", " + admin + ")")
return data
def createAppointment(navn, start, slutt, beskrivelse, sted, moteromID):
data = executeSQL( "INSERT INTO AVTALE (navn, start, slutt, beskrivelse, sted, moteromID) VALUES (" \
+ navn + ", " + start + ", " + slutt + ", " + beskrivelse + ", " + sted + ", " + moteromID + ")")
return data
def createAppointmentMember(brukerID, avtaleID, deltar, admin, varsel):
data = executeSQL( "INSERT INTO AVTALEBRUKER (brukerID, avtaleID, deltar, admin, varsel) VALUES (" \
+ brukerID + ", " + avtaleID + ", " + deltar + ", " + admin + ", " + varsel + ")")
return data
def getAppointments(brukerID, start, slutt):
data = executeSQL( "SELECT *, AVTALE.navn AS avtaleNavn, MOTEROM.navn AS moterom FROM AVTALE " \
"INNER JOIN MOTEROM " \
"ON AVTALE.moteromID = MOTEROM.moteromID " \
"WHERE (start >= " + start + " AND slutt < " + slutt + ") AND avtaleID IN (" \
"SELECT avtaleID FROM AVTALEBRUKER WHERE brukerID = " + brukerID + ")")
return data
def updateAppointmentUser(brukerID, avtaleID, deltar, varsel):
data = executeSQL( "UPDATE AVTALEBRUKER" \
" SET deltar = " + deltar + ", varsel = " + varsel + \
" WHERE avtaleID = " + avtaleID + " AND brukerID =" + brukerID)
return data
def getAttendanceAlert(brukerID, avtaleID):
data = executeSQL( "SELECT deltar, varsel, admin FROM AVTALEBRUKER WHERE brukerID = " \
+ brukerID + " AND avtaleID = " + avtaleID)
return data
def getAttendingUsers(avtaleID):
data = executeSQL( "SELECT * " \
"FROM AVTALEBRUKER " \
"INNER JOIN BRUKER " \
"ON AVTALEBRUKER.brukerID = BRUKER.brukerID " \
"WHERE avtaleID = " + avtaleID )
return data
def getJoinedMembers(gruppeNavn):
data = executeSQL( "SELECT * " \
"FROM GRUPPEBRUKER " \
"INNER JOIN BRUKER " \
"ON GRUPPEBRUKER.brukerID = BRUKER.brukerID " \
"INNER JOIN GRUPPE " \
"ON GRUPPE.gruppeID = GRUPPEBRUKER.gruppeID " \
"WHERE GRUPPE.navn = '" + gruppeNavn + "'")
return data
def getAllAppointmentsForAdmin(brukerID):
data = executeSQL( "SELECT AVTALE.avtaleID, AVTALE.navn, admin, deltar " \
"FROM AVTALE " \
"INNER JOIN AVTALEBRUKER " \
"ON AVTALE.avtaleID = AVTALEBRUKER.avtaleID " \
"INNER JOIN BRUKER " \
"ON BRUKER.brukerID = AVTALEBRUKER.brukerID " \
"WHERE BRUKER.brukerID = " + brukerID)
return data
def deleteAppointment(avtaleID):
data = executeSQL( "DELETE FROM AVTALE WHERE avtaleID = " + avtaleID )
return data
def getRoomWithID(moteromID):
data = executeSQL( "SELECT navn FROM MOTEROM WHERE moteromID = " + moteromID )
return data
def getAppointmentsCount():
data = executeSQL("SELECT count(avtaleID) FROM AVTALE")
return data
def getGroupsCount():
data = executeSQL("SELECT count(gruppeID) FROM GRUPPE")
return data
def getMembersCount():
data = executeSQL("SELECT count(brukerID) FROM BRUKER")
return data
def getUserWithEmail(email):
data = executeSQL("SELECT brukernavn, passord FROM BRUKER WHERE epost =" + email)
return data
def executeSQL(sqlq):
db = MySQLdb.connect( host = "localhost", # host
user = "root", # username
passwd = "felles35_15", # password
db = "kalenderdatabase") # name of the database
# Creation of cursor object. This lets us execute all the queries we need
cur = db.cursor()
try :
# SQL-queries
cur.execute(sqlq)
db.commit()
except MySQLdb.IntegrityError, message:
errorcode = message[0] # get MySQL error code
if errorcode == 1062 : # if duplicate
return "duplicateEntry"
# Parse query-results to json-dump
results = dictfetchall(cur)
json_results = str(json.dumps(results))
if (json_results == "[]"):
json_results = '[{"lastInsertID": ' + str(cur.lastrowid) + '}]'
return json_results
def dictfetchall(cursor):
"""Returns all rows from a cursor as a list of dicts"""
desc = cursor.description
return [dict(itertools.izip([col[0] for col in desc], row))
for row in cursor.fetchall()]
if debug:
data = "k2pj39as9d0uo34jkh41#('markusra', 'test')"
interpreter(data)
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Operations for feature preprocessing.
Each function defines a preprocessing operation.
Images should be either uint8 in [0, 255] or float32 in [0, 1].
The first argument should be 'image' or 'features'.
In case of 'image' the op only gets the 'image' feature (a tensor of shape
[h, w, c]) and should return a transformation of it.
In case of 'feature' the op gets the full feature dictionary
(Dict[str, Tensor]) and should return a modified dictionary. It can
add, remove and modify entries.
"""
import inspect
import sys
from typing import Tuple, Union, Callable, List, Any
import tensorflow.compat.v2 as tf
import tensorflow_addons.image as tfa_image
def all_ops():
ops = [
fn for name, fn in inspect.getmembers(sys.modules[__name__])
if not name.startswith("_") and hasattr(fn, "__name__") and callable(fn)
]
return ops
def _are_all_ints(arr):
"""Check whether all elements in arr are ints."""
for a in arr:
if not isinstance(a, int):
return False
return True
def _get_image_size(img, dynamic=False):
"""Get width, height for input image."""
if dynamic:
size = tf.shape(img)[:2]
return size[1], size[0]
else:
size = img.get_shape().as_list()[:2]
return size[1], size[0]
def to_float_0_1(image):
"""Convert pixels to tf.float32 and rescale from [0, 255] to [0, 1]."""
assert image.dtype == tf.uint8, image.dtype
return tf.cast(image, tf.float32) / 255.0
def value_range(image, vmin, vmax, in_min=0, in_max=1.0, clip_values=False):
"""Transforms a [in_min,in_max] image to [vmin,vmax] range.
Input ranges in_min/in_max can be equal-size lists to rescale the invidudal
channels independently.
Args:
image: Input image. Will be cast to tf.float32 regardless of input type.
vmin: A scalar. Output max value.
vmax: A scalar. Output min value.
in_min: A scalar or a list of input min values to scale. If a list, the
length should match to the number of channels in the image.
in_max: A scalar or a list of input max values to scale. If a list, the
length should match to the number of channels in the image.
clip_values: Whether to clip the output values to the provided ranges.
Returns:
A function to rescale the values.
"""
assert vmin < vmax, "vmin {} not less than vmax {}".format(vmin, vmax)
in_min_t = tf.constant(in_min, tf.float32)
in_max_t = tf.constant(in_max, tf.float32)
image = tf.cast(image, tf.float32)
image = (image - in_min_t) / (in_max_t - in_min_t)
image = vmin + image * (vmax - vmin)
if clip_values:
image = tf.clip_by_value(image, vmin, vmax)
return image
def resize(features,
resolution,
keys = ("image",),
methods = ("bilinear",)):
"""Resize features to resolution disregarding aspect ratio."""
if len(methods) != len(keys):
raise ValueError("Number of keys for resizing must equal methods.")
for key, method in zip(keys, methods):
old_dtype = features[key].dtype
if features[key].shape.ndims == 2:
squeeze_extra_dim = True
blob = features[key][:, :, None]
else:
squeeze_extra_dim = False
blob = features[key]
blob = tf.cast(blob, dtype=tf.float32)
blob_resized = tf.image.resize(blob, resolution, method=method)
blob_resized = tf.cast(blob_resized, dtype=old_dtype)
if squeeze_extra_dim:
features[key] = blob_resized[:, :, 0]
else:
features[key] = blob_resized
return features
def random_resize(features,
scale = (0.5, 2.0),
ensure_small = None,
keys = ("image",),
methods = ("bilinear",)):
"""Randomly resize the image and label by a uniformly sampled scale.
Args:
features: Input dictionary containing "image", "label", and other keys.
scale: Output image and label will be scaled by a scale sampled uniformly
at random in this range.
ensure_small: Ignored if None. Else, if input image size * min(scale) is
less than ensure_small, it will adjust the scale so that the output
image is always at least as big as ensure_small. This is useful so that
subsequent crop operations do not go out of range.
keys: Keys to apply resize op to. Note that keys starting with prefix
"label" will be resized using nearest neighbour.
methods: Resize methods per key.
Returns:
features with randomly scaled "images" defined by keys.
"""
if ensure_small is None:
scale_min, scale_max = scale
else:
width, height = _get_image_size(features["image"], dynamic=True)
scale_min = tf.maximum(ensure_small / tf.cast(width, dtype=tf.float32),
ensure_small / tf.cast(height, dtype=tf.float32))
scale_max = tf.maximum(scale[1], scale_min)
scale_chosen = tf.random.uniform(
shape=(), minval=scale_min, maxval=scale_max, dtype=tf.float32)
width, height = _get_image_size(features["image"], dynamic=True)
new_width = tf.cast(tf.cast(width, tf.float32) * scale_chosen, tf.int32)
new_height = tf.cast(tf.cast(height, tf.float32) * scale_chosen, tf.int32)
return resize(features, (new_height, new_width), keys, methods)
def resize_small(features, size,
keys = ("image",),
methods = ("bilinear",)):
"""Resizes the image to `size` but preserves the aspect ratio."""
for key, method in zip(keys, methods):
image = features[key]
ndims = image.shape.ndims
if ndims == 2:
image = image[:, :, None]
height = tf.cast(tf.shape(image)[0], tf.float32)
width = tf.cast(tf.shape(image)[1], tf.float32)
ratio = float(size) / tf.math.minimum(height, width)
new_height = tf.cast(tf.math.ceil(height * ratio), tf.int32)
new_width = tf.cast(tf.math.ceil(width * ratio), tf.int32)
features[key] = tf.image.resize(image, [new_height, new_width],
method=method)
if ndims == 2:
features[key] = features[key][Ellipsis, 0]
return features
def _pad_multichannel(image, ensure_small, pad_value, mode):
"""Pad to ensure `ensure_small`."""
pad_h = tf.maximum(ensure_small[0] - tf.shape(image)[0], 0)
pad_h_l = pad_h // 2
pad_h_r = pad_h - pad_h_l
pad_w = tf.maximum(ensure_small[1] - tf.shape(image)[1], 0)
pad_w_l = pad_w // 2
pad_w_r = pad_w - pad_w_l
def pad_2d(x, v):
"""Pad 2D input `x` with constant value `v`."""
return tf.pad(
x, [[pad_h_l, pad_h_r], [pad_w_l, pad_w_r]] + [[0, 0]] *
(len(x.shape) - 2), mode=mode, constant_values=v)
if isinstance(pad_value, (list, tuple)):
image_new = tf.stack(
[pad_2d(image[:, :, i], v) for i, v in enumerate(pad_value)], axis=2)
else:
image_new = pad_2d(image, pad_value)
return image_new
def pad(features, ensure_small,
pad_values=(0.,), keys=("image",), mode="CONSTANT"):
"""Pads features to minimum resolution."""
padding_mode = mode
if mode == "NOISE":
assert all(v == 0. for v in pad_values), (
"pad_values should be 0. when padding mode is NOISE")
padding_mode = "CONSTANT"
for k, pad_value in zip(keys, pad_values):
original_features = features[k]
features[k] = _pad_multichannel(features[k], ensure_small, pad_value,
padding_mode)
if mode == "NOISE":
# Pad the image with Gaussian noise with the same statistics (mean and
# standard deviation) as the image.
mask = tf.ones_like(original_features)
mask = (1. - _pad_multichannel(mask, ensure_small, 0., "CONSTANT"))
raw_noise = tf.random.normal(tf.shape(mask))
std = tf.math.reduce_std(original_features, axis=(0, 1), keepdims=True)
mean = tf.math.reduce_mean(original_features, axis=(0, 1), keepdims=True)
noise = (raw_noise * std + mean) * mask
features[k] += noise
return features
def central_crop(features,
crop_size,
keys = ("image",)):
"""Center crops given input.
Args:
features: Input features dictionary.
crop_size: Output resolution.
keys: Fields in features which need to be cropped.
Returns:
Cropped features.
"""
h_c, w_c = crop_size
for key in keys:
h, w = tf.unstack(tf.shape(features[key]))[:2]
h_offset = (h - h_c) // 2
w_offset = (w - w_c) // 2
features[key] = features[key][h_offset:h_offset + h_c,
w_offset:w_offset + w_c]
for key in keys:
features[key].set_shape([h_c, w_c] + features[key].get_shape()[2:])
return features
def crop(features,
rect,
keys = ("image",)):
"""Crop given input rectangle.
Rectangle ends are not included in crop.
Args:
features: Input features dictionary.
rect: Rectangle format is [h_offset, w_offset, height, width].
keys: Fields in features which need to be cropped.
Returns:
Cropped features.
"""
for key in keys:
features[key] = features[key][rect[0]:rect[0] + rect[2],
rect[1]:rect[1] + rect[3]]
if _are_all_ints(rect):
for key in keys:
features[key].set_shape([rect[2], rect[3]] +
features[key].get_shape()[2:])
return features
def _get_random_crop_rectangle(image_shape, resolution):
"""Given image shape and desired crop resolution sample a crop rectangle.
Rectangle format is [h_offset, w_offset, height, width].
Args:
image_shape: tf.shape(image) output.
resolution: height x width target crop resolution. Fails when the crop is
too big for the given image. No checks are performed.
Returns:
rect: Tuple[int, int, int, int].
"""
h = image_shape[0]
w = image_shape[1]
h_offset = tf.random.uniform((),
minval=0,
maxval=h - resolution[0] + 1,
dtype=tf.int32)
w_offset = tf.random.uniform((),
minval=0,
maxval=w - resolution[1] + 1,
dtype=tf.int32)
rect = (h_offset, w_offset, resolution[0], resolution[1])
return rect
def random_crop(features,
resolution,
keys = ("image",)):
"""Random crop."""
rect = _get_random_crop_rectangle(tf.shape(features[keys[0]]), resolution)
features = crop(features, rect, keys=keys)
if _are_all_ints(resolution):
for key in keys:
features[key].set_shape([resolution[0], resolution[1]] +
features[key].get_shape()[2:])
return features
def random_size_crop(features,
resolution_min,
resolution_max = None,
keys = ("image",)):
"""Crop of random size with minimum and maximum resolution.
Args:
features: Input features that must include specified keys.
resolution_min: Minimum resolution of the crop.
resolution_max: Maximum resolution of the crop. Defaults to inputs shape.
keys: On which keys to apply this function.
Returns:
Transformed features.
"""
image_shape = tf.unstack(tf.shape(features[keys[0]]))
if not resolution_max:
resolution_max = (image_shape[0], image_shape[1])
h = tf.random.uniform([], tf.minimum(resolution_min[0], resolution_max[0]),
resolution_max[0] + 1, dtype=tf.int32)
w = tf.random.uniform([], tf.minimum(resolution_min[1], resolution_max[1]),
resolution_max[1] + 1, dtype=tf.int32)
resolution = (h, w)
rect = _get_random_crop_rectangle(image_shape, resolution)
features = crop(features, rect, keys=keys)
return features
def random_left_right_flip(features, keys = ("image",)):
"""Randomly left-right flip feature fields with 50% probability."""
stride = tf.random.uniform((), minval=0, maxval=2, dtype=tf.int32) * 2 - 1
for key in keys:
old_shape = features[key].get_shape().as_list()
features[key] = features[key][:, ::stride]
features[key].set_shape(old_shape)
return features
def random_rotate(features, angle_range, keys=("image",)):
"""Randomly rotates all features with defined keys."""
angle = tf.random.uniform([], *angle_range, dtype=tf.float32)
for k in keys:
features[k] = tfa_image.rotate(features[k], angle)
return features
def _gauss_filter(kernel_size, sigma):
"""Creates Gaussian filter."""
x = tf.range(-kernel_size // 2, kernel_size // 2 + 1, 1, dtype=tf.float32)
if not sigma:
sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8
x = tf.exp(x**2 / (-2 * sigma**2))
x /= tf.reduce_sum(x, axis=-1, keepdims=True)
return x
def gaussian_blur(image, kernel_size,
sigma = None):
"""Puts blur on the image."""
fil_x = _gauss_filter(kernel_size[0], sigma=sigma[0] if sigma else None)
fil_y = _gauss_filter(kernel_size[1], sigma=sigma[1] if sigma else None)
fil = fil_x[:, None, None, None] * fil_y[None, :, None, None]
fil = tf.tile(fil, [1, 1, image.shape[-1], 1])
res = tf.nn.depthwise_conv2d(
image[None], fil, strides=[1, 1, 1, 1], padding="SAME")
res = tf.squeeze(res, 0)
return res
def label_map(features,
source_labels,
target_labels = None,
default_label = 0,
keys = ("label",)):
"""Label mapping.
Args:
features: Dictionary of data features to preprocess.
source_labels: Tuple of source labels.
target_labels: Tuple of target labels, aligned with source_labels. If not
set, source_labels is used for target_labels.
default_label: Labels that aren't in source_labels are mapped to this label.
keys: On which keys to apply this function.
Returns:
Features with mapped labels.
"""
table = tf.lookup.StaticHashTable(
tf.lookup.KeyValueTensorInitializer(
source_labels,
source_labels if target_labels is None else target_labels),
default_label)
for key in keys:
features[key] = table.lookup(features[key])
return features
def binarize(features,
threshold = 0,
keys = ("image",)):
"""Binarizes a (grayscale, uint8) image.
Args:
features: Dictionary of data features to preprocess.
threshold: Threshold to distinguish background from foreground.
keys: On which keys to apply this function.
Returns:
Features with the binarized image.
"""
for key in keys:
image = features[key]
assert image.dtype == tf.uint8
# assert (image.shape.ndims == 3 and image.shape[-1] == 1 or
# image.shape.ndims == 2)
binarized = tf.cast(image > tf.cast(threshold, tf.uint8), tf.uint8) * 255
features[key] = binarized
return features
# Some operations on point clouds.
def binary_image_to_points(features,
normalize_coords = True,
keys = ("image",)):
"""Converts a (binary) image into a 2D point cloud.
Args:
features: Dictionary of data features to preprocess.
normalize_coords: Normalize coords to be in [0,1] by preserving the aspect
ratio.
keys: On which keys to apply this function.
Returns:
Features with the image as a point cloud.
"""
for key in keys:
image = features[key] # [HxW] or [HxWx1]
image = tf.reshape(image, [image.shape[0], image.shape[1], 1])
# We map background pixels to the origin, which may be suboptimal
# but saves us some engineering work.
coords = tf.cast(
tf.stack(tf.meshgrid(tf.range(image.shape[0]), tf.range(image.shape[1]),
indexing="ij"), axis=-1),
tf.float32)
if normalize_coords:
coords /= tf.cast(tf.reduce_max(image.shape[:2]), tf.float32)
mask = tf.tile(image > 0, [1, 1, 2])
features[key] = tf.reshape(tf.cast(mask, tf.float32) * coords, [-1, 2])
return features
def points_shuffle(features,
keys = ("image",)):
"""Shuffle points.
Args:
features: Dictionary of data features to preprocess.
keys: On which keys to apply this function.
Returns:
Features with shuffled points.
"""
for key in keys:
features[key] = tf.random.shuffle(features[key])
return features
def points_padded_to_end(features,
keys = ("image",)):
"""Move all padded points to the end.
Args:
features: Dictionary of data features to preprocess.
keys: On which keys to apply this function.
Returns:
Features with padded points at the end.
"""
for key in keys:
features[key] = _points_sort(
features[key],
key=lambda x: tf.cast(tf.reduce_sum(x, axis=-1) == 0.0, tf.float32),
stable=True)
return features
def _points_sort(points, key, stable=False):
indices = tf.argsort(key(points), stable=stable)
return tf.gather(points, indices)
def points_select_first_n(features,
num_points,
keys = ("image",)):
"""Resize the number of points.
Args:
features: Dictionary of data features to preprocess.
num_points: The target number of points.
keys: On which keys to apply this function.
Returns:
Features with resized number of points.
"""
for key in keys:
features[key] = tf.slice(features[key], begin=(0, 0), size=(num_points, -1))
return features
def points_scale(features,
stddev,
keys = ("image",)):
"""Randomly scale points from the origin.
Args:
features: Dictionary of data features to preprocess.
stddev: The stddev of the scale.
keys: On which keys to apply this function.
Returns:
Features with scaled points.
"""
for key in keys:
features[key] = features[key] * (1.0 + tf.random.truncated_normal(
shape=(1, 3), mean=0.0, stddev=stddev))
return features
def points_translate(features,
stddev,
keys = ("image",)):
"""Randomly translate points.
Args:
features: Dictionary of data features to preprocess.
stddev: The stddev of the translation.
keys: On which keys to apply this function.
Returns:
Features with translated points.
"""
for key in keys:
features[key] = features[key] + tf.random.truncated_normal(
shape=(1, 3), mean=0.0, stddev=stddev)
return features
def points_rotate(features,
max_rotation,
min_rotation = 0.0,
axis = "z",
keys = ("image",)):
"""Randomly rotate points on a given axis.
Args:
features: Dictionary of data features to preprocess.
max_rotation: The maximum possible rotation in radians.
min_rotation: The minimum possible rotation in radians.
axis: The rotation axis.
keys: On which keys to apply this function.
Returns:
Features with rotated points.
"""
assert axis in {"x", "y", "z"}, "invalid rotation axis"
for key in keys:
phi = tf.random.uniform(
shape=(1,), minval=min_rotation, maxval=max_rotation)
cos, sin, zero, one = (tf.cos(phi), tf.sin(phi), tf.zeros((1,)),
tf.ones((1,)))
# Matrices from
# https://en.wikipedia.org/wiki/Rotation_matrix#Basic_rotations.
if axis == "x":
rotation_matrix = [one, zero, zero, zero, cos, -sin, zero, sin, cos]
elif axis == "y":
rotation_matrix = [cos, zero, sin, zero, one, zero, -sin, zero, cos]
elif axis == "z":
rotation_matrix = [cos, -sin, zero, sin, cos, zero, zero, zero, one]
rotate = tf.reshape(tf.stack(rotation_matrix, axis=0), [3, 3])
features[key] = tf.matmul(features[key], rotate)
return features
def random_linear_transform(image,
a_bounds,
b_bounds,
p):
"""Random linear augmentation to compute a * image + b with probability p.
Based on
https://github.com/idiap/attention-sampling/blob/504b1733869e18005d099ec04f7cbd1793043d67/ats/utils/layers.py#L125
Args:
image: The image to transform.
a_bounds: Lower and upper bounds of a.
b_bounds: Lower and upper bounds of b.
p: Probability to apply the transform, otherwise keep image identical.
Returns:
The transformed image.
"""
assert image.dtype == tf.float32, image.dtype
shape = (1, 1, 1)
indicator = tf.random.uniform(shape=shape, minval=0, maxval=1) < p
indicator = tf.cast(indicator, image.dtype)
a = tf.random.uniform(shape=shape, minval=a_bounds[0], maxval=a_bounds[1])
b = tf.random.uniform(shape=shape, minval=b_bounds[0], maxval=b_bounds[1])
a = indicator * a + (1 - indicator)
b = indicator * b
return a * image + b
def normalize(image,
mu,
sigma):
assert image.dtype == tf.float32, image.dtype
mu = tf.constant(mu)[None, None, :]
sigma = tf.constant(sigma)[None, None, :]
return (image - mu) / sigma
| |
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.web import HTTPError
from collections import defaultdict
from json import loads, dumps
from qiita_core.qiita_settings import r_client
import qiita_db as qdb
from .oauth2 import OauthBaseHandler, authenticate_oauth
def _get_artifact(a_id):
"""Returns the artifact with the given id if it exists
Parameters
----------
a_id : str
The artifact id
Returns
-------
qiita_db.artifact.Artifact
The requested artifact
Raises
------
HTTPError
If the artifact does not exist, with error code 404
If there is a problem instantiating the artifact, with error code 500
"""
try:
a_id = int(a_id)
artifact = qdb.artifact.Artifact(a_id)
except qdb.exceptions.QiitaDBUnknownIDError:
raise HTTPError(404)
except Exception as e:
raise HTTPError(500, reason='Error instantiating artifact %s: %s'
% (a_id, str(e)))
return artifact
class ArtifactHandler(OauthBaseHandler):
@authenticate_oauth
def get(self, artifact_id):
"""Retrieves the artifact information
Parameters
----------
artifact_id : str
The id of the artifact whose information is being retrieved
Returns
-------
dict
The artifact information:
'name': artifact name
'timestamp': artifact creation timestamp
'visibility': artifact visibility
'type': artifact type
'data_type': artifact data type
'can_be_submitted_to_ebi': if the artifact can be submitted to ebi
'ebi_run_accessions': dict with the EBI run accessions attached to
the artifact
'can_be_submitted_to_vamps': if the artifact can be submitted to
vamps
'is_submitted_to_vamps': whether the artifact has been submitted
to vamps or not
'prep_information': list of prep information ids
'study': the study id
'processing_parameters': dict with the processing parameters used
to generate the artifact or None
'files': dict with the artifact files, keyed by filepath type
"""
with qdb.sql_connection.TRN:
artifact = _get_artifact(artifact_id)
study = artifact.study
analysis = artifact.analysis
response = {
'name': artifact.name,
'timestamp': str(artifact.timestamp),
'visibility': artifact.visibility,
'type': artifact.artifact_type,
'data_type': artifact.data_type,
'can_be_submitted_to_ebi': artifact.can_be_submitted_to_ebi,
'can_be_submitted_to_vamps':
artifact.can_be_submitted_to_vamps,
'prep_information': [p.id for p in artifact.prep_templates],
'study': study.id if study else None,
'analysis': analysis.id if analysis else None}
params = artifact.processing_parameters
response['processing_parameters'] = (
params.values if params is not None else None)
response['ebi_run_accessions'] = (
artifact.ebi_run_accessions
if response['can_be_submitted_to_ebi'] else None)
response['is_submitted_to_vamps'] = (
artifact.is_submitted_to_vamps
if response['can_be_submitted_to_vamps'] else None)
# Instead of sending a list of files, provide the files as a
# dictionary keyed by filepath type
response['files'] = defaultdict(list)
for x in artifact.filepaths:
response['files'][x['fp_type']].append(x['fp'])
self.write(response)
@authenticate_oauth
def patch(self, artifact_id):
"""Patches the artifact information
Parameter
---------
artifact_id : str
The id of the artifact whose information is being updated
"""
req_op = self.get_argument('op')
req_path = self.get_argument('path')
req_value = self.get_argument('value')
if req_op == 'add':
req_path = [v for v in req_path.split('/') if v]
if len(req_path) != 1 or req_path[0] != 'html_summary':
raise HTTPError(400, reason='Incorrect path parameter value')
else:
artifact = _get_artifact(artifact_id)
try:
html_data = loads(req_value)
html_fp = html_data['html']
html_dir = html_data['dir']
except ValueError:
html_fp = req_value
html_dir = None
try:
artifact.set_html_summary(html_fp, html_dir)
except Exception as e:
raise HTTPError(500, reason=str(e))
else:
raise HTTPError(400, reason='Operation "%s" not supported. '
'Current supported operations: add' % req_op)
self.finish()
class ArtifactAPItestHandler(OauthBaseHandler):
@authenticate_oauth
def post(self):
"""Creates a new artifact
Parameters
----------
filepaths : str
Json string with a list of filepaths and its types
type : str
The artifact type
prep_template: int
The id of the template that the new artifact belongs to
name : str, optional
The artifact name
Returns
-------
dict
'artifact': the id of the new artifact
See Also
--------
qiita_db.artifact.Artifact.create
"""
filepaths = loads(self.get_argument('filepaths'))
artifact_type = self.get_argument('type')
prep_template = self.get_argument('prep', None)
analysis = self.get_argument('analysis', None)
name = self.get_argument('name', None)
dtype = self.get_argument('data_type', None)
if prep_template is not None:
prep_template = qdb.metadata_template.prep_template.PrepTemplate(
prep_template)
dtype = None
if analysis is not None:
analysis = qdb.analysis.Analysis(analysis)
a = qdb.artifact.Artifact.create(
filepaths, artifact_type, name=name, prep_template=prep_template,
analysis=analysis, data_type=dtype)
self.write({'artifact': a.id})
class ArtifactTypeHandler(OauthBaseHandler):
@authenticate_oauth
def get(self):
"""Returns the artifact types and their local mountpoint location
Returns
-------
dict
'artifact_type': local mountpoint
"""
atypes = dict()
for atype in qdb.util.get_artifact_types():
mountpoints = qdb.util.get_mountpoint(atype)
if mountpoints:
# [0][1]: get latest/active and the actual location
atypes[atype] = mountpoints[0][1]
# add the upload location
atypes['uploads'] = qdb.util.get_mountpoint('uploads')[0][1]
self.write(atypes)
@authenticate_oauth
def post(self):
"""Creates a new artifact type
Parameters
----------
name : str
The artifact type name
description : str
The artifact type description
can_be_submitted_to_ebi : bool
Whether the artifact type can be submitted to EBI or not
can_be_submitted_to_vamps : bool
Whether the artifact type can be submitted to VAMPS or not
is_user_uploadable : bool
Whether the artifact type can be raw: direct upload to qiita
filepath_types : list of (str, bool)
The list filepath types that the new artifact type supports, and
if they're required or not in an artifact instance of this type
"""
a_type = self.get_argument('type_name')
a_desc = self.get_argument('description')
ebi = self.get_argument('can_be_submitted_to_ebi')
vamps = self.get_argument('can_be_submitted_to_vamps')
raw = self.get_argument('is_user_uploadable')
fp_types = loads(self.get_argument('filepath_types'))
try:
qdb.artifact.Artifact.create_type(a_type, a_desc, ebi, vamps, raw,
fp_types)
except qdb.exceptions.QiitaDBDuplicateError:
# Ignoring this error as we want this endpoint in the rest api
# to be idempotent.
self.set_status(200, reason="Artifact type already exists")
self.finish()
class APIArtifactHandler(OauthBaseHandler):
@authenticate_oauth
def post(self):
user_email = self.get_argument('user_email')
job_id = self.get_argument('job_id', None)
prep_id = self.get_argument('prep_id', None)
atype = self.get_argument('artifact_type')
aname = self.get_argument('command_artifact_name', 'Name')
files = self.get_argument('files')
if job_id is None and prep_id is None:
raise HTTPError(
400, reason='You need to specify a job_id or a prep_id')
if job_id is not None and prep_id is not None:
raise HTTPError(
400, reason='You need to specify only a job_id or a prep_id')
user = qdb.user.User(user_email)
values = {
'files': files, 'artifact_type': atype, 'name': aname,
# leaving here in case we need to add a way to add an artifact
# directly to an analysis, for more information see
# ProcessingJob._complete_artifact_transformation
'analysis': None}
PJ = qdb.processing_job.ProcessingJob
if job_id is not None:
TN = qdb.sql_connection.TRN
job = PJ(job_id)
with TN:
sql = """SELECT command_output_id
FROM qiita.command_output
WHERE name = %s AND command_id = %s"""
TN.add(sql, [aname, job.command.id])
results = TN.execute_fetchflatten()
if len(results) < 1:
raise HTTPError(400, 'The command_artifact_name does not '
'exist in the command')
cmd_out_id = results[0]
provenance = {'job': job_id,
'cmd_out_id': cmd_out_id,
# direct_creation is a flag to avoid having to wait
# for the complete job to create the new artifact,
# which is normally ran during regular processing.
# Skipping is fine because we are adding an artifact
# to an existing job outside of regular processing
'direct_creation': True,
'name': aname}
values['provenance'] = dumps(provenance)
# inherint the first prep info file from the first input artifact
prep_id = job.input_artifacts[0].prep_templates[0].id
else:
prep_id = int(prep_id)
values['template'] = prep_id
cmd = qdb.software.Command.get_validator(atype)
params = qdb.software.Parameters.load(cmd, values_dict=values)
new_job = PJ.create(user, params, True)
new_job.submit()
r_client.set('prep_template_%d' % prep_id,
dumps({'job_id': new_job.id, 'is_qiita_job': True}))
self.write(new_job.id)
self.finish()
| |
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provides common functionality for integrated unit tests
"""
import random
import string
import time
from oslo_log import log as logging
import nova.conf
import nova.image.glance
from nova import test
from nova.tests import fixtures as nova_fixtures
from nova.tests.unit import cast_as_call
import nova.tests.unit.image.fake
from nova.tests import uuidsentinel as uuids
CONF = nova.conf.CONF
LOG = logging.getLogger(__name__)
def generate_random_alphanumeric(length):
"""Creates a random alphanumeric string of specified length."""
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _x in range(length))
def generate_random_numeric(length):
"""Creates a random numeric string of specified length."""
return ''.join(random.choice(string.digits)
for _x in range(length))
def generate_new_element(items, prefix, numeric=False):
"""Creates a random string with prefix, that is not in 'items' list."""
while True:
if numeric:
candidate = prefix + generate_random_numeric(8)
else:
candidate = prefix + generate_random_alphanumeric(8)
if candidate not in items:
return candidate
LOG.debug("Random collision on %s" % candidate)
class _IntegratedTestBase(test.TestCase):
REQUIRES_LOCKING = True
ADMIN_API = False
def setUp(self):
super(_IntegratedTestBase, self).setUp()
self.flags(verbose=True)
nova.tests.unit.image.fake.stub_out_image_service(self)
self._setup_services()
self.api_fixture = self.useFixture(
nova_fixtures.OSAPIFixture(self.api_major_version))
# if the class needs to run as admin, make the api endpoint
# the admin, otherwise it's safer to run as non admin user.
if self.ADMIN_API:
self.api = self.api_fixture.admin_api
else:
self.api = self.api_fixture.api
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.addCleanup(nova.tests.unit.image.fake.FakeImageService_reset)
def _setup_compute_service(self):
return self.start_service('compute')
def _setup_scheduler_service(self):
self.flags(group='scheduler', driver='chance_scheduler')
return self.start_service('scheduler')
def _setup_services(self):
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self._setup_compute_service()
self.consoleauth = self.start_service('consoleauth')
self.network = self.start_service('network')
self.scheduler = self._setup_scheduler_service()
def get_unused_server_name(self):
servers = self.api.get_servers()
server_names = [server['name'] for server in servers]
return generate_new_element(server_names, 'server')
def get_unused_flavor_name_id(self):
flavors = self.api.get_flavors()
flavor_names = list()
flavor_ids = list()
[(flavor_names.append(flavor['name']),
flavor_ids.append(flavor['id']))
for flavor in flavors]
return (generate_new_element(flavor_names, 'flavor'),
int(generate_new_element(flavor_ids, '', True)))
def get_invalid_image(self):
return uuids.fake
def _build_minimal_create_server_request(self):
server = {}
# We now have a valid imageId
server[self._image_ref_parameter] = self.api.get_images()[0]['id']
# Set a valid flavorId
flavor = self.api.get_flavors()[0]
LOG.debug("Using flavor: %s" % flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
def _create_flavor_body(self, name, ram, vcpus, disk, ephemeral, id, swap,
rxtx_factor, is_public):
return {
"flavor": {
"name": name,
"ram": ram,
"vcpus": vcpus,
"disk": disk,
"OS-FLV-EXT-DATA:ephemeral": ephemeral,
"id": id,
"swap": swap,
"rxtx_factor": rxtx_factor,
"os-flavor-access:is_public": is_public,
}
}
def _create_flavor(self, memory_mb=2048, vcpu=2, disk=10, ephemeral=10,
swap=0, rxtx_factor=1.0, is_public=True,
extra_spec=None):
flv_name, flv_id = self.get_unused_flavor_name_id()
body = self._create_flavor_body(flv_name, memory_mb, vcpu, disk,
ephemeral, flv_id, swap, rxtx_factor,
is_public)
self.api_fixture.admin_api.post_flavor(body)
if extra_spec is not None:
spec = {"extra_specs": extra_spec}
self.api_fixture.admin_api.post_extra_spec(flv_id, spec)
return flv_id
def _build_server(self, flavor_id):
server = {}
image = self.api.get_images()[0]
LOG.debug("Image: %s" % image)
# We now have a valid imageId
server[self._image_ref_parameter] = image['id']
# Set a valid flavorId
flavor = self.api.get_flavor(flavor_id)
LOG.debug("Using flavor: %s" % flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
def _check_api_endpoint(self, endpoint, expected_middleware):
app = self.api_fixture.osapi.app.get((None, '/v2'))
while getattr(app, 'application', False):
for middleware in expected_middleware:
if isinstance(app.application, middleware):
expected_middleware.remove(middleware)
break
app = app.application
self.assertEqual([],
expected_middleware,
("The expected wsgi middlewares %s are not "
"existed") % expected_middleware)
class InstanceHelperMixin(object):
def _wait_for_state_change(self, admin_api, server, expected_status,
max_retries=10):
retry_count = 0
while True:
server = admin_api.get_server(server['id'])
if server['status'] == expected_status:
break
retry_count += 1
if retry_count == max_retries:
self.fail('Wait for state change failed, '
'expected_status=%s, actual_status=%s'
% (expected_status, server['status']))
time.sleep(0.5)
return server
def _build_minimal_create_server_request(self, api, name, image_uuid=None,
flavor_id=None):
server = {}
# We now have a valid imageId
server['imageRef'] = image_uuid or api.get_images()[0]['id']
if not flavor_id:
# Set a valid flavorId
flavor_id = api.get_flavors()[1]['id']
server['flavorRef'] = ('http://fake.server/%s' % flavor_id)
server['name'] = name
return server
| |
# -*- coding: utf-8 -*-
"""
waveforms.py - Waveform functions used by StimGenerator
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
This file defines several waveform-generating functions meant to be
called from within a StimGenerator widget.
"""
import numpy
## Checking functions
def isNum(x):
return hasattr(x, '__int__')
def isNumOrNone(x):
return (x is None) or isNum(x)
def isList(x):
return hasattr(x, '__len__')
def isNumList(x):
return isList(x) and (len(x) > 0) and isNum(x[0])
## Functions to allow in eval for the waveform generator.
## The first parameter is always a dict which will at least contain 'rate' and 'nPts'.
## this parameter is automatically supplied, and will not be entered by the end user.
## These should be very robust with good error reporting since end users will be using them.
def pulse(params, times, widths, values, base=0.0):
nPts = params['nPts']
rate = params['rate']
if not isList(times):
times = [times]
if not isList(widths):
widths = [widths] * len(times)
if not isList(values):
values = [values] * len(times)
d = numpy.empty(nPts)
d[:] = base
for i in range(len(times)):
t1 = int(times[i] * rate)
wid = int(widths[i] * rate)
if wid == 0:
params['message'] = "WARNING: Pulse width %f is too short for rate %f" % (widths[i], rate)
if t1+wid >= nPts:
params['message'] = "WARNING: Function is longer than generated waveform."
d[t1:t1+wid] = values[i]
return d
def steps(params, times, values, base=0.0):
rate = params['rate']
nPts = params['nPts']
if not isList(times):
raise Exception('times argument must be a list')
if not isList(values):
raise Exception('values argument must be a list')
d = numpy.empty(nPts)
d[:] = base
for i in range(1, len(times)):
t1 = int(times[i-1] * rate)
t2 = int(times[i] * rate)
if t1 == t2:
params['message'] = "WARNING: Step width %f is too short for rate %f" % (times[i]-times[i-1], rate)
if t2 >= nPts:
params['message'] = "WARNING: Function is longer than generated waveform."
d[t1:t2] = values[i-1]
last = int(times[-1] * rate)
d[last:] = values[-1]
return d
def sineWave(params, period, amplitude=1.0, phase=0.0, start=0.0, stop=None, base=0.0):
rate = params['rate']
nPts = params['nPts']
params['message'] = ""
## Check all arguments
if not isNum(amplitude):
raise Exception("Amplitude argument must be a number")
if not isNum(period):
raise Exception("Period argument must be a number")
if not isNum(phase):
raise Exception("Phase argument must be a number")
if not isNumOrNone(start):
raise Exception("Start argument must be a number")
if not isNumOrNone(stop):
raise Exception("Stop argument must be a number")
## initialize array
d = numpy.empty(nPts)
d[:] = base
## Define start and end points
if start is None:
start = 0
else:
start = int(start * rate)
if stop is None:
stop = nPts-1
else:
stop = int(stop * rate)
if stop > nPts-1:
params['message'] += "WARNING: Function is longer than generated waveform\n"
stop = nPts-1
cycleTime = int(period * rate)
if cycleTime < 10:
params['message'] += 'Warning: Period is less than 10 samples\n'
#d[start:stop] = numpy.fromfunction(lambda i: amplitude * numpy.sin(phase * 2.0 * numpy.pi + i * 2.0 * numpy.pi / (period * rate)), (stop-start,))
d[start:stop] = amplitude * numpy.sin(phase * 2.0 * numpy.pi + numpy.arange(stop-start) * 2.0 * numpy.pi / (period * rate))
return d
def squareWave(params, period, amplitude=1.0, phase=0.0, duty=0.5, start=0.0, stop=None, base=0.0):
rate = params['rate']
nPts = params['nPts']
params['message'] = ""
## Check all arguments
if not isNum(amplitude):
raise Exception("Amplitude argument must be a number")
if not isNum(period) or period <= 0:
raise Exception("Period argument must be a number > 0")
if not isNum(phase):
raise Exception("Phase argument must be a number")
if not isNum(duty) or duty < 0.0 or duty > 1.0:
raise Exception("Duty argument must be a number between 0.0 and 1.0")
if not isNumOrNone(start):
raise Exception("Start argument must be a number")
if not isNumOrNone(stop):
raise Exception("Stop argument must be a number")
## initialize array
d = numpy.empty(nPts)
d[:] = base
## Define start and end points
if start is None:
start = 0
else:
start = int(start * rate)
if stop is None:
stop = nPts-1
else:
stop = int(stop * rate)
if stop > nPts-1:
params['message'] += "WARNING: Function is longer than generated waveform\n"
stop = nPts-1
pulseWidth = int(duty * period * rate)
phase = (phase % 1.0) - 1.0
pulseShift = int(phase * period * rate)
cycleTime = int(period * rate)
if cycleTime < 10:
params['message'] += 'Warning: Period is less than 10 samples\n'
if cycleTime < 1:
return numpy.zeros(nPts)
nCycles = 2 + int((stop-start) / float(period*rate))
for i in range(nCycles):
ptr = start + int(i*period*rate)
a = ptr + pulseShift
if a > stop:
break
b = a + pulseWidth
a = max(a, start)
b = min(b, stop)
if a >= b:
continue
d[a:b] = amplitude
return d
def sawWave(params, period, amplitude=1.0, phase=0.0, start=0.0, stop=None, base=0.0):
rate = params['rate']
nPts = params['nPts']
params['message'] = ""
## Check all arguments
if not isNum(amplitude):
raise Exception("Amplitude argument must be a number")
if not isNum(period) or period <= 0:
raise Exception("Period argument must be a number > 0")
if not isNum(phase):
raise Exception("Phase argument must be a number")
if not isNumOrNone(start):
raise Exception("Start argument must be a number")
if not isNumOrNone(stop):
raise Exception("Stop argument must be a number")
## initialize array
d = numpy.empty(nPts)
d[:] = base
## Define start and end points
if start is None:
start = 0
else:
start = int(start * rate)
if stop is None:
stop = nPts-1
else:
stop = int(stop * rate)
if stop > nPts-1:
params['message'] += "WARNING: Function is longer than generated waveform\n"
stop = nPts-1
#if period * rate < 10:
#params['message'] += "Warning: period is less than 10 samples\n"
cycleTime = int(period * rate)
if cycleTime < 10:
params['message'] += 'Warning: Period is less than 10 samples\n'
if cycleTime < 1:
return numpy.zeros(nPts)
#d[start:stop] = amplitude * numpy.fromfunction(lambda t: (phase + t/float(rate*period)) % 1.0, (stop-start,))
d[start:stop] = amplitude * ((phase + numpy.arange(stop-start)/float(rate*period)) % 1.0)
return d
def listWave(params, period, values=None, phase=0.0, start=0.0, stop=None, base=0.0):
rate = params['rate']
nPts = params['nPts']
params['message'] = ""
## Check all arguments
if type(values) not in [list, tuple, numpy.ndarray]or len(values) < 1:
raise Exception("Values argument must be a list or array")
values = numpy.array(values)
if values.ndim != 1:
raise Exception("Values argument must be 1-dimensional array")
if not isNum(period) or period <= 0:
raise Exception("Period argument must be a number > 0")
if not isNum(phase):
raise Exception("Phase argument must be a number")
if not isNumOrNone(start):
raise Exception("Start argument must be a number")
if not isNumOrNone(stop):
raise Exception("Stop argument must be a number")
## initialize array
d = numpy.empty(nPts)
d[:] = base
## Define start and end points
if start is None:
start = 0
else:
start = int(start * rate)
if stop is None:
stop = nPts-1
else:
stop = int(stop * rate)
if stop > nPts-1:
params['message'] += "WARNING: Function is longer than generated waveform\n"
stop = nPts-1
cycleTime = int(period * rate)
if cycleTime < 10:
params['message'] += 'Warning: Period is less than 10 samples\n'
if cycleTime < 1:
return numpy.zeros(nPts)
#saw = numpy.fromfunction(lambda t: len(values) * ((phase + t/float(rate*period)) % 1.0), (stop-start,))
saw = len(values) * ((phase + numpy.arange(stop-start)/float(rate*period)) % 1.0)
d[start:stop] = values[saw.astype(int).clip(0, len(values)-1)]
#d[start:stop] = saw
return d
def noise(params, mean, sigma, start=0.0, stop=None):
rate = params['rate']
nPts = params['nPts']
params['message'] = ""
if not isNum(mean):
raise Exception("Mean argument must be a number")
if not isNum(sigma):
raise Exception("Sigma argument must be a number")
if not isNumOrNone(start):
raise Exception("Start argument must be a number")
if not isNumOrNone(stop):
raise Exception("Stop argument must be a number")
## initialize array
d = numpy.zeros(nPts)
## Define start and end points
if start is None:
start = 0
else:
start = int(start * rate)
if stop is None:
stop = nPts-1
else:
stop = int(stop * rate)
if stop > nPts-1:
params['message'] += "WARNING: Function is longer than generated waveform\n"
stop = nPts-1
d[start:stop] = numpy.random.normal(size=stop-start, loc=mean, scale=sigma)
return d
| |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from sqlalchemy.ext.hybrid import hybrid_property
from indico.util.caching import memoize_request
from indico.util.locators import locator_property
from indico.web.flask.util import url_for
class ProposalGroupProxy(object):
"""Represents the object that the proposals can be grouped by.
It provides all necessary methods for building the URLs, displaying the
grouping information, etc.
"""
title_attr = 'title'
full_title_attr = 'full_title'
def __init__(self, group):
self.instance = group
def __eq__(self, other):
if isinstance(other, ProposalGroupProxy):
return self.instance == other.instance
elif isinstance(other, type(self.instance)):
return self.instance == other
else:
return False
def __hash__(self):
return hash(self.instance)
def __ne__(self, other):
return not (self == other)
@property
def title(self):
return getattr(self.instance, self.title_attr)
@property
def full_title(self):
return (getattr(self.instance, self.full_title_attr)
if hasattr(self.instance, self.full_title_attr)
else self.title)
@locator_property
def locator(self):
return self.instance.locator
def __repr__(self):
return '<ProposalGroupProxy: {}>'.format(self.instance)
class ProposalRevisionMixin(object):
""" Properties and methods of a proposal revision."""
#: The attribute of the revision used to fetch the proposal object.
proposal_attr = None
#: Whether the reviewing process supports multiple revisions per proposal.
#: If set to false it is assumed that the reviewing process supports only
#: one revision per proposal.
revisions_enabled = True
@property
def proposal(self):
# Property to fetch the proposal object. If multiple revisions are
# disabled, the revision is represented by the proposal object itself.
return getattr(self, self.proposal_attr) if self.revisions_enabled else self
def get_timeline(self, user=None):
raise NotImplementedError
def get_reviews(self, group=None, user=None):
reviews = self.reviews[:]
if group:
reviews = [x for x in reviews if x.group == group]
if user:
reviews = [x for x in reviews if x.user == user]
return reviews
def get_reviewed_for_groups(self, user, include_reviewed=False):
raise NotImplementedError
@memoize_request
def get_reviewer_render_data(self, user):
groups = self.get_reviewed_for_groups(user, include_reviewed=True)
reviews = {x.group: x for x in self.get_reviews(user=user)}
reviewed_groups = {x.group for x in reviews.itervalues()}
missing_groups = groups - reviewed_groups
return {'groups': groups,
'missing_groups': missing_groups,
'reviewed_groups': reviewed_groups,
'reviews': reviews}
class ProposalMixin(object):
"""Classes that represent a proposal object should extend this class (ex:
Abstract, Paper).
"""
#: A unique identifier to handle rendering differences between proposal
#: types
proposal_type = None
#: Attribute to retrieve the object with access to the reviewing settings
call_for_proposals_attr = None
#: Whether there is support for multiple revisions per proposal or just one
revisions_enabled = True
# endpoints
delete_comment_endpoint = None
create_comment_endpoint = None
edit_comment_endpoint = None
create_review_endpoint = None
edit_review_endpoint = None
create_judgment_endpoint = None
@property
def cfp(self):
return getattr(self.event, self.call_for_proposals_attr)
@property
def is_in_final_state(self):
raise NotImplementedError
def get_revisions(self):
if self.revisions_enabled:
raise NotImplementedError
else:
return [self]
def get_last_revision(self):
if self.revisions_enabled:
raise NotImplementedError
else:
return self
def can_comment(self, user):
raise NotImplementedError
def can_review(self, user, check_state=False):
raise NotImplementedError
def get_delete_comment_url(self, comment):
return url_for(self.delete_comment_endpoint, comment)
def get_save_comment_url(self, comment=None):
return (url_for(self.edit_comment_endpoint, comment)
if comment
else url_for(self.create_comment_endpoint, self))
def get_save_review_url(self, group=None, review=None):
return (url_for(self.edit_review_endpoint, review)
if review
else url_for(self.create_review_endpoint, self, group))
def get_save_judgment_url(self):
return url_for(self.create_judgment_endpoint, self)
class ProposalCommentMixin(object):
timeline_item_type = 'comment'
def can_edit(self, user):
raise NotImplementedError
class ProposalReviewMixin(object):
"""Mixin for proposal reviews
Classes that represent a review of a proposal should extend this class
(ex: AbstractReview, PaperReview).
"""
#: A unique identifier to handle rendering differences between timeline
#: items
timeline_item_type = 'review'
#: The revision object that the review refers to
revision_attr = None
#: Object used to group reviews together
group_attr = None
#: Proxy class to provide the necessary properties and methods to the
#: review grouping object
group_proxy_cls = ProposalGroupProxy
@hybrid_property
def revision(self):
return getattr(self, self.revision_attr)
@property
def group(self):
return self.group_proxy_cls(getattr(self, self.group_attr))
@property
def score(self):
return None
def can_edit(self, user):
raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.