gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_vhdutils
from nova.virt.hyperv import constants
from nova.virt.hyperv import vhdutilsv2
from nova.virt.hyperv import vmutils
class VHDUtilsV2TestCase(test_vhdutils.VHDUtilsBaseTestCase):
"""Unit tests for the Hyper-V VHDUtilsV2 class."""
_FAKE_BLOCK_SIZE = 33554432L
_FAKE_LOG_SIZE = 1048576
_FAKE_LOGICAL_SECTOR_SIZE = 4096
_FAKE_METADATA_SIZE = 1048576
_FAKE_PHYSICAL_SECTOR_SIZE = 4096L
def setUp(self):
super(VHDUtilsV2TestCase, self).setUp()
self._vhdutils = vhdutilsv2.VHDUtilsV2()
self._vhdutils._conn = mock.MagicMock()
self._vhdutils._vmutils = mock.MagicMock()
self._fake_file_handle = mock.MagicMock()
self._fake_vhd_info = {
'Path': self._FAKE_VHD_PATH,
'ParentPath': self._FAKE_PARENT_PATH,
'Format': self._FAKE_FORMAT,
'MaxInternalSize': self._FAKE_MAX_INTERNAL_SIZE,
'Type': self._FAKE_TYPE,
'BlockSize': self._FAKE_BLOCK_SIZE,
'LogicalSectorSize': self._FAKE_LOGICAL_SECTOR_SIZE,
'PhysicalSectorSize': self._FAKE_PHYSICAL_SECTOR_SIZE}
def _mock_get_vhd_info(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.GetVirtualHardDiskSettingData.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL, self._FAKE_VHD_INFO_XML)
def test_get_vhd_info(self):
self._mock_get_vhd_info()
vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH)
self.assertEqual(self._FAKE_VHD_PATH, vhd_info['Path'])
self.assertEqual(self._FAKE_PARENT_PATH, vhd_info['ParentPath'])
self.assertEqual(self._FAKE_FORMAT, vhd_info['Format'])
self.assertEqual(self._FAKE_MAX_INTERNAL_SIZE,
vhd_info['MaxInternalSize'])
self.assertEqual(self._FAKE_TYPE, vhd_info['Type'])
def test_get_vhd_info_no_parent(self):
fake_vhd_xml_no_parent = self._FAKE_VHD_INFO_XML.replace(
self._FAKE_PARENT_PATH, "")
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.GetVirtualHardDiskSettingData.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL, fake_vhd_xml_no_parent)
vhd_info = self._vhdutils.get_vhd_info(self._FAKE_VHD_PATH)
self.assertEqual(self._FAKE_VHD_PATH, vhd_info['Path'])
self.assertIsNone(vhd_info['ParentPath'])
self.assertEqual(self._FAKE_FORMAT, vhd_info['Format'])
self.assertEqual(self._FAKE_MAX_INTERNAL_SIZE,
vhd_info['MaxInternalSize'])
self.assertEqual(self._FAKE_TYPE, vhd_info['Type'])
def test_create_dynamic_vhd(self):
self._vhdutils.get_vhd_info = mock.MagicMock(
return_value={'Format': self._FAKE_FORMAT})
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vhdutils.create_dynamic_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE,
constants.DISK_FORMAT_VHDX)
self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called)
def test_create_differencing_vhd(self):
self._vhdutils.get_vhd_info = mock.MagicMock(
return_value={'ParentPath': self._FAKE_PARENT_PATH,
'Format': self._FAKE_FORMAT})
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.CreateVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vhdutils.create_differencing_vhd(self._FAKE_VHD_PATH,
self._FAKE_PARENT_PATH)
self.assertTrue(mock_img_svc.CreateVirtualHardDisk.called)
def test_reconnect_parent_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
fake_new_parent_path = 'fake_new_parent_path'
self._vhdutils._get_vhd_info_xml = mock.MagicMock(
return_value=self._FAKE_VHD_INFO_XML)
mock_img_svc.SetVirtualHardDiskSettingData.return_value = (
self._FAKE_JOB_PATH, self._FAKE_RET_VAL)
self._vhdutils.reconnect_parent_vhd(self._FAKE_VHD_PATH,
fake_new_parent_path)
expected_virt_disk_data = self._FAKE_VHD_INFO_XML.replace(
self._FAKE_PARENT_PATH, fake_new_parent_path)
mock_img_svc.SetVirtualHardDiskSettingData.assert_called_once_with(
VirtualDiskSettingData=expected_virt_disk_data)
def test_reconnect_parent_vhd_exception(self):
# Test that reconnect_parent_vhd raises an exception if the
# vhd info XML does not contain the ParentPath property.
fake_vhd_info_xml = self._FAKE_VHD_INFO_XML.replace('ParentPath',
'FakeParentPath')
self._vhdutils._get_vhd_info_xml = mock.Mock(
return_value=fake_vhd_info_xml)
self.assertRaises(vmutils.HyperVException,
self._vhdutils.reconnect_parent_vhd,
self._FAKE_VHD_PATH,
mock.sentinel.new_parent_path)
def test_resize_vhd(self):
mock_img_svc = self._vhdutils._conn.Msvm_ImageManagementService()[0]
mock_img_svc.ResizeVirtualHardDisk.return_value = (self._FAKE_JOB_PATH,
self._FAKE_RET_VAL)
self._vhdutils.get_internal_vhd_size_by_file_size = mock.MagicMock(
return_value=self._FAKE_MAX_INTERNAL_SIZE)
self._vhdutils.resize_vhd(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE)
mock_img_svc.ResizeVirtualHardDisk.assert_called_once_with(
Path=self._FAKE_VHD_PATH,
MaxInternalSize=self._FAKE_MAX_INTERNAL_SIZE)
self.mock_get = self._vhdutils.get_internal_vhd_size_by_file_size
self.mock_get.assert_called_once_with(self._FAKE_VHD_PATH,
self._FAKE_MAX_INTERNAL_SIZE)
def _test_get_vhdx_internal_size(self, vhd_type):
self._vhdutils.get_vhd_info = mock.MagicMock()
self._vhdutils.get_vhd_parent_path = mock.Mock(
return_value=self._FAKE_PARENT_PATH)
if vhd_type == 4:
self._vhdutils.get_vhd_info.side_effect = [
{'Type': vhd_type}, self._fake_vhd_info]
else:
self._vhdutils.get_vhd_info.return_value = self._fake_vhd_info
@mock.patch('nova.virt.hyperv.vhdutils.VHDUtils.get_vhd_format')
def test_get_vhdx_internal_size(self, mock_get_vhd_format):
mock_get_vhd_format.return_value = constants.DISK_FORMAT_VHDX
self._mock_get_vhd_info()
self._vhdutils._get_vhdx_log_size = mock.MagicMock(
return_value=self._FAKE_LOG_SIZE)
self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
return_value=(self._FAKE_METADATA_SIZE, 1024))
self._vhdutils._get_vhdx_block_size = mock.MagicMock(
return_value=self._FAKE_BLOCK_SIZE)
file_mock = mock.MagicMock()
with mock.patch('__builtin__.open', file_mock):
internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
self._FAKE_VHD_PATH, self._FAKE_MAX_INTERNAL_SIZE))
self.assertEqual(self._FAKE_MAX_INTERNAL_SIZE - self._FAKE_BLOCK_SIZE,
internal_size)
def test_get_vhdx_internal_size_dynamic(self):
self._test_get_vhdx_internal_size(3)
def test_get_vhdx_internal_size_differencing(self):
self._test_get_vhdx_internal_size(4)
def test_get_vhdx_current_header(self):
VHDX_HEADER_OFFSETS = [64 * 1024, 128 * 1024]
fake_sequence_numbers = ['\x01\x00\x00\x00\x00\x00\x00\x00',
'\x02\x00\x00\x00\x00\x00\x00\x00']
self._fake_file_handle.read = mock.MagicMock(
side_effect=fake_sequence_numbers)
offset = self._vhdutils._get_vhdx_current_header_offset(
self._fake_file_handle)
self.assertEqual(offset, VHDX_HEADER_OFFSETS[1])
def test_get_vhdx_metadata_size(self):
fake_metadata_offset = '\x01\x00\x00\x00\x00\x00\x00\x00'
fake_metadata_size = '\x01\x00\x00\x00'
self._fake_file_handle.read = mock.MagicMock(
side_effect=[fake_metadata_offset, fake_metadata_size])
metadata_size, metadata_offset = (
self._vhdutils._get_vhdx_metadata_size_and_offset(
self._fake_file_handle))
self.assertEqual(metadata_size, 1)
self.assertEqual(metadata_offset, 1)
def test_get_block_size(self):
self._vhdutils._get_vhdx_metadata_size_and_offset = mock.MagicMock(
return_value=(self._FAKE_METADATA_SIZE, 1024))
fake_block_size = '\x01\x00\x00\x00'
self._fake_file_handle.read = mock.MagicMock(
return_value=fake_block_size)
block_size = self._vhdutils._get_vhdx_block_size(
self._fake_file_handle)
self.assertEqual(block_size, 1)
def test_get_log_size(self):
fake_current_header_offset = 64 * 1024
self._vhdutils._get_vhdx_current_header_offset = mock.MagicMock(
return_value=fake_current_header_offset)
fake_log_size = '\x01\x00\x00\x00'
self._fake_file_handle.read = mock.MagicMock(
return_value=fake_log_size)
log_size = self._vhdutils._get_vhdx_log_size(self._fake_file_handle)
self.assertEqual(log_size, 1)
def test_get_supported_vhd_format(self):
fmt = self._vhdutils.get_best_supported_vhd_format()
self.assertEqual(constants.DISK_FORMAT_VHDX, fmt)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT train function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.contrib import layers
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import tree_config_pb2
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.training.functions import gbdt_batch
from tensorflow.contrib.boosted_trees.python.utils import losses
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resources
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
def _squared_loss(label, unused_weights, predictions):
"""Unweighted loss implementation."""
loss = math_ops.reduce_sum(
math_ops.square(predictions - label), 1, keep_dims=True)
return loss
class GbdtTest(test_util.TensorFlowTestCase):
def setUp(self):
super(GbdtTest, self).setUp()
def testExtractFeatures(self):
"""Tests feature extraction."""
with self.test_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_int"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.int64),
array_ops.zeros([2], dtypes.int64))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (gbdt_batch.extract_features(features, None))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_int"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_int"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(),
features["sparse_int"].values.eval())
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_int"].dense_shape.eval())
def testExtractFeaturesWithTransformation(self):
"""Tests feature extraction."""
with self.test_session():
features = {}
features["dense_float"] = array_ops.zeros([2, 1], dtypes.float32)
features["sparse_float"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros([2], dtypes.float32),
array_ops.zeros([2], dtypes.int64))
features["sparse_categorical"] = sparse_tensor.SparseTensor(
array_ops.zeros([2, 2], dtypes.int64),
array_ops.zeros(
[2], dtypes.string), array_ops.zeros([2], dtypes.int64))
feature_columns = set()
feature_columns.add(layers.real_valued_column("dense_float"))
feature_columns.add(
layers.feature_column._real_valued_var_len_column(
"sparse_float", is_sparse=True))
feature_columns.add(
feature_column_lib.sparse_column_with_hash_bucket(
"sparse_categorical", hash_bucket_size=1000000))
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes) = (gbdt_batch.extract_features(
features, feature_columns))
self.assertEqual(len(fc_names), 3)
self.assertAllEqual(fc_names,
["dense_float", "sparse_float", "sparse_categorical"])
self.assertEqual(len(dense_floats), 1)
self.assertEqual(len(sparse_float_indices), 1)
self.assertEqual(len(sparse_float_values), 1)
self.assertEqual(len(sparse_float_shapes), 1)
self.assertEqual(len(sparse_int_indices), 1)
self.assertEqual(len(sparse_int_values), 1)
self.assertEqual(len(sparse_int_shapes), 1)
self.assertAllEqual(dense_floats[0].eval(),
features["dense_float"].eval())
self.assertAllEqual(sparse_float_indices[0].eval(),
features["sparse_float"].indices.eval())
self.assertAllEqual(sparse_float_values[0].eval(),
features["sparse_float"].values.eval())
self.assertAllEqual(sparse_float_shapes[0].eval(),
features["sparse_float"].dense_shape.eval())
self.assertAllEqual(sparse_int_indices[0].eval(),
features["sparse_categorical"].indices.eval())
self.assertAllEqual(sparse_int_values[0].eval(), [397263, 397263])
self.assertAllEqual(sparse_int_shapes[0].eval(),
features["sparse_categorical"].dense_shape.eval())
def testTrainFnChiefNoBiasCentering(self):
"""Tests the train function running on chief without bias centering."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefScalingNumberOfExamples(self):
"""Tests the train function running on chief without bias centering."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
num_examples_fn = (
lambda layer: math_ops.pow(math_ops.cast(2, dtypes.int64), layer) * 1)
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=num_examples_fn,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [0.1])
self.assertEquals(stamp_token.eval(), 2)
expected_tree = """
nodes {
dense_float_binary_split {
threshold: 1.0
left_id: 1
right_id: 2
}
node_metadata {
gain: 0
}
}
nodes {
leaf {
vector {
value: 0.25
}
}
}
nodes {
leaf {
vector {
value: 0.0
}
}
}"""
self.assertProtoEquals(expected_tree, output.trees[0])
def testTrainFnChiefWithBiasCentering(self):
"""Tests the train function running on chief with bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 12,
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect bias to be centered.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
expected_tree = """
nodes {
leaf {
vector {
value: 0.25
}
}
}"""
self.assertEquals(len(output.trees), 1)
self.assertAllEqual(output.tree_weights, [1.0])
self.assertProtoEquals(expected_tree, output.trees[0])
self.assertEquals(stamp_token.eval(), 1)
def testTrainFnNonChiefNoBiasCentering(self):
"""Tests the train function running on worker without bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testTrainFnNonChiefWithCentering(self):
"""Tests the train function running on worker with bias centering."""
with self.test_session():
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
predictions = array_ops.constant(
[[0.0], [1.0], [0.0], [2.0]], dtype=dtypes.float32)
partition_ids = array_ops.zeros([4], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp
}
labels = array_ops.ones([4, 1], dtypes.float32)
weights = array_ops.ones([4, 1], dtypes.float32)
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
_squared_loss(labels, weights, predictions)),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# Regardless of how many times the train op is run, a non-chief worker
# can only accumulate stats so the tree ensemble never changes.
for _ in range(5):
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 0)
def testPredictFn(self):
"""Tests the predict function."""
with self.test_session() as sess:
# Create ensemble with one bias node.
ensemble_config = tree_config_pb2.DecisionTreeEnsembleConfig()
text_format.Merge("""
trees {
nodes {
leaf {
vector {
value: 0.25
}
}
}
}
tree_weights: 1.0
tree_metadata {
num_tree_weight_updates: 1
num_layers_grown: 1
is_finalized: true
}""", ensemble_config)
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=3,
tree_ensemble_config=ensemble_config.SerializeToString(),
name="tree_ensemble")
resources.initialize_resources(resources.shared_resources()).run()
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
learner_config.num_classes = 2
learner_config.regularization.l1 = 0
learner_config.regularization.l2 = 0
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
features["dense_float"] = array_ops.ones([4, 1], dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=False,
num_ps_replicas=0,
center_bias=True,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=1, features=features)
# Create predict op.
mode = model_fn.ModeKeys.EVAL
predictions_dict = sess.run(gbdt_model.predict(mode))
self.assertEquals(predictions_dict["ensemble_stamp"], 3)
self.assertAllClose(predictions_dict["predictions"], [[0.25], [0.25],
[0.25], [0.25]])
self.assertAllClose(predictions_dict["partition_ids"], [0, 0, 0, 0])
def testTrainFnMulticlassFullHessian(self):
"""Tests the GBDT train for multiclass full hessian."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {}
batch_size = 3
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEquals(len(output.trees), 0)
self.assertEquals(len(output.tree_weights), 0)
self.assertEquals(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEquals(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-3.4480, -3.4429, 13.8490, -3.45, -3.4508]
expected_leaf_2 = [-1.2547, -1.3145, 1.52, 2.3875, -1.3264]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
def testTrainFnMulticlassDiagonalHessian(self):
"""Tests the GBDT train for multiclass diagonal hessian."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
batch_size = 3
features = {}
features["dense_float"] = array_ops.constant(
[0.3, 1.5, 1.1], dtype=dtypes.float32)
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 0.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
"num_trees": 0,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
# We got 3 nodes: one parent and 2 leafs.
self.assertEqual(len(output.trees[0].nodes), 3)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# Leafs should have a dense vector of size 5.
expected_leaf_1 = [-1.0354, -1.0107, 17.2976, -1.1313, -4.5023]
expected_leaf_2 = [-1.2924, -1.1376, 2.2042, 3.1052, -1.6269]
self.assertArrayNear(expected_leaf_1,
output.trees[0].nodes[1].leaf.vector.value, 1e-3)
self.assertArrayNear(expected_leaf_2,
output.trees[0].nodes[2].leaf.vector.value, 1e-3)
def testTrainFnMulticlassTreePerClass(self):
"""Tests the GBDT train for multiclass tree per class strategy."""
with self.test_session() as sess:
ensemble_handle = model_ops.tree_ensemble_variable(
stamp_token=0, tree_ensemble_config="", name="tree_ensemble")
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = 1
# Use full hessian multiclass strategy.
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
learner_config.num_classes = 5
learner_config.regularization.l1 = 0
# To make matrix inversible.
learner_config.regularization.l2 = 1e-5
learner_config.constraints.max_tree_depth = 1
learner_config.constraints.min_node_weight = 0
features = {
"dense_float": array_ops.constant(
[[1.0], [1.5], [2.0]], dtypes.float32),
}
gbdt_model = gbdt_batch.GradientBoostedDecisionTreeModel(
is_chief=True,
num_ps_replicas=0,
center_bias=False,
ensemble_handle=ensemble_handle,
examples_per_layer=1,
learner_config=learner_config,
logits_dimension=5, features=features)
batch_size = 3
predictions = array_ops.constant(
[[0.0, -1.0, 0.5, 1.2, 3.1], [1.0, 0.0, 0.8, 0.3, 1.0],
[0.0, 0.0, 0.0, 2.0, 1.2]],
dtype=dtypes.float32)
labels = array_ops.constant([[2], [2], [3]], dtype=dtypes.float32)
weights = array_ops.ones([batch_size, 1], dtypes.float32)
partition_ids = array_ops.zeros([batch_size], dtypes.int32)
ensemble_stamp = variables.Variable(
initial_value=0,
name="ensemble_stamp",
trainable=False,
dtype=dtypes.int64)
predictions_dict = {
"predictions": predictions,
"predictions_no_dropout": predictions,
"partition_ids": partition_ids,
"ensemble_stamp": ensemble_stamp,
# This should result in a tree built for a class 2.
"num_trees": 13,
}
# Create train op.
train_op = gbdt_model.train(
loss=math_ops.reduce_mean(
losses.per_example_maxent_loss(
labels,
weights,
predictions,
num_classes=learner_config.num_classes)[0]),
predictions_dict=predictions_dict,
labels=labels)
variables.global_variables_initializer().run()
resources.initialize_resources(resources.shared_resources()).run()
# On first run, expect no splits to be chosen because the quantile
# buckets will not be ready.
train_op.run()
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 0)
self.assertEqual(len(output.tree_weights), 0)
self.assertEqual(stamp_token.eval(), 1)
# Update the stamp to be able to run a second time.
sess.run([ensemble_stamp.assign_add(1)])
# On second run, expect a trivial split to be chosen to basically
# predict the average.
train_op.run()
output = tree_config_pb2.DecisionTreeEnsembleConfig()
output.ParseFromString(serialized.eval())
stamp_token, serialized = model_ops.tree_ensemble_serialize(
ensemble_handle)
output.ParseFromString(serialized.eval())
self.assertEqual(len(output.trees), 1)
self.assertAllClose(output.tree_weights, [1])
self.assertEqual(stamp_token.eval(), 2)
# One node for a split, two children nodes.
self.assertEqual(3, len(output.trees[0].nodes))
# Leafs will have a sparse vector for class 3.
self.assertEqual(1,
len(output.trees[0].nodes[1].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[1].leaf.sparse_vector.index[0])
self.assertAlmostEqual(
-1.13134455681, output.trees[0].nodes[1].leaf.sparse_vector.value[0])
self.assertEqual(1,
len(output.trees[0].nodes[2].leaf.sparse_vector.index))
self.assertEqual(3, output.trees[0].nodes[2].leaf.sparse_vector.index[0])
self.assertAlmostEqual(
0.893284678459, output.trees[0].nodes[2].leaf.sparse_vector.value[0])
if __name__ == "__main__":
googletest.main()
| |
#
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call (nearly) every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import os
import sys
import tempfile
import unittest
from test.test_support import requires, import_module, verbose, run_unittest
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
requires('curses')
# If either of these don't exist, skip the tests.
curses = import_module('curses')
import_module('curses.panel')
import_module('curses.ascii')
def requires_curses_func(name):
return unittest.skipUnless(hasattr(curses, name),
'requires curses.%s' % name)
term = os.environ.get('TERM')
# If newterm was supported we could use it instead of initscr and not exit
@unittest.skipIf(not term or term == 'unknown',
"$TERM=%r, calling initscr() may cause exit" % term)
@unittest.skipIf(sys.platform == "cygwin",
"cygwin's curses mostly just hangs")
class TestCurses(unittest.TestCase):
@classmethod
def setUpClass(cls):
if not sys.__stdout__.isatty():
# Temporary skip tests on non-tty
raise unittest.SkipTest('sys.__stdout__ is not a tty')
cls.tmp = tempfile.TemporaryFile()
fd = cls.tmp.fileno()
else:
cls.tmp = None
fd = sys.__stdout__.fileno()
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=fd)
@classmethod
def tearDownClass(cls):
if cls.tmp:
cls.tmp.close()
del cls.tmp
def setUp(self):
if verbose:
# just to make the test output a little more readable
print()
self.stdscr = curses.initscr()
curses.savetty()
def tearDown(self):
curses.resetty()
curses.endwin()
def test_window_funcs(self):
"Test the methods of windows"
stdscr = self.stdscr
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
with self.assertRaises(TypeError,
msg="Expected win.border() to raise TypeError"):
win.border(65, 66, 67, 68,
69, [], 71, 72)
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 2, 1, 3, 3)
win2.overwrite(win, 1, 2, 2, 1, 3, 3)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def test_module_funcs(self):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
with tempfile.TemporaryFile() as f:
self.stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp(b'abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm(b'cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
def test_colors_funcs(self):
if not curses.has_colors():
self.skip('requires colors support')
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
@requires_curses_func('keyname')
def test_keyname(self):
curses.keyname(13)
@requires_curses_func('has_key')
def test_has_key(self):
curses.has_key(13)
@requires_curses_func('getmouse')
def test_getmouse(self):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
if availmask == 0:
self.skip('mouse stuff not available')
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
def test_userptr_without_set(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
with self.assertRaises(curses.panel.error,
msg='userptr should fail since not set'):
p.userptr()
def test_userptr_memory_leak(self):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
obj = object()
nrefs = sys.getrefcount(obj)
for i in range(100):
p.set_userptr(obj)
p.set_userptr(None)
self.assertEqual(sys.getrefcount(obj), nrefs,
"set_userptr leaked references")
def test_userptr_segfault(self):
panel = curses.panel.new_panel(self.stdscr)
class A:
def __del__(self):
panel.set_userptr(None)
panel.set_userptr(A())
panel.set_userptr(None)
def test_new_curses_panel(self):
panel = curses.panel.new_panel(self.stdscr)
self.assertRaises(TypeError, type(panel))
@requires_curses_func('is_term_resized')
def test_is_term_resized(self):
curses.is_term_resized(*self.stdscr.getmaxyx())
@requires_curses_func('resize_term')
def test_resize_term(self):
curses.resize_term(*self.stdscr.getmaxyx())
@requires_curses_func('resizeterm')
def test_resizeterm(self):
stdscr = self.stdscr
lines, cols = curses.LINES, curses.COLS
new_lines = lines - 1
new_cols = cols + 1
curses.resizeterm(new_lines, new_cols)
self.assertEqual(curses.LINES, new_lines)
self.assertEqual(curses.COLS, new_cols)
def test_issue6243(self):
curses.ungetch(1025)
self.stdscr.getkey()
def test_issue10570(self):
b = curses.tparm(curses.tigetstr("cup"), 5, 3)
self.assertIs(type(b), bytes)
class TestAscii(unittest.TestCase):
def test_unctrl(self):
unctrl = curses.ascii.unctrl
self.assertEqual(unctrl('a'), 'a')
self.assertEqual(unctrl('A'), 'A')
self.assertEqual(unctrl(';'), ';')
self.assertEqual(unctrl(' '), ' ')
self.assertEqual(unctrl('\x7f'), '^?')
self.assertEqual(unctrl('\n'), '^J')
self.assertEqual(unctrl('\0'), '^@')
# Meta-bit characters
self.assertEqual(unctrl('\x8a'), '!^J')
self.assertEqual(unctrl('\xc1'), '!A')
def test_main():
run_unittest(TestCurses, TestAscii)
if __name__ == "__main__":
unittest.main()
| |
'''dossier.fc Feature Collections
.. This software is released under an MIT/X11 open source license.
Copyright 2012-2015 Diffeo, Inc.
.. autoclass:: StringCounter
'''
from __future__ import absolute_import, division, print_function
try:
from collections import Counter
except ImportError:
from backport_collections import Counter
from collections import Mapping, MutableMapping
from functools import wraps
from dossier.fc.exceptions import ReadOnlyException, uni
def mutates(f):
'''Decorator for functions that mutate :class:`StringCounter`.
This raises :exc:`~dossier.fc.exceptions.ReadOnlyException` if
the object is read-only, and increments the generation counter
otherwise.
'''
@wraps(f)
def wrapper(self, *args, **kwargs):
if self.read_only:
raise ReadOnlyException()
self.next_generation()
return f(self, *args, **kwargs)
return wrapper
class StringCounter(Counter):
'''Simple counter based on exact string matching.
This is a subclass of :class:`collections.Counter` that includes a
generation counter so that it can be used in a cache.
:class:`StringCounter` is the default feature type in a feature
collection, so you typically don't have to instantiate a
:class:`StringCounter` explicitly::
fc = FeatureCollection()
fc['NAME']['John Smith'] += 1
But instantiating directly works too::
sc = StringCounter()
sc['John Smith'] += 1
fc = FeatureCollection({'NAME': sc})
fc['NAME']['John Smith'] += 1
assert fc['NAME']['John Smith'] == 2
Note that instances of this class support all the methods defined
for a :class:`collections.Counter`, but only the ones unique to
:class:`StringCounter` are listed here.
.. automethod:: __init__
.. automethod:: truncate_most_common
.. attribute:: read_only
Flag indicating whether this collection is read-only.
This flag always begins as :const:`False`, it cannot be set
via the constructor for compatibility with
:class:`collections.Counter`. If this flag is set, then any
operations that mutate it will raise
:exc:`~dossier.fc.exceptions.ReadOnlyException`.
.. attribute:: generation
Generation number for this counter instance.
This number is incremented by every operation that
mutates the counter object. If two collections are the
same object and have the same generation number, then
they are identical.
Having this property allows a pair of `id(sc)` and the
generation to be an immutable hashable key for things like
memoization operations, accounting for the possibility of the
counter changing over time.
>>> sc = StringCounter({'a': 1})
>>> cache = {(id(sc), sc.generation): 1}
>>> (id(sc), sc.generation) in cache
True
>>> sc['a']
1
>>> (id(sc), sc.generation) in cache
True
>>> sc['a'] += 1
>>> sc['a']
2
>>> (id(sc), sc.generation) in cache
False
'''
current_generation = 0
'''Class-static generation number.
Each mutation of a StringCounter increments this generation,
and sets the counter's current generation to this value.
See :meth:`next_generation` for details.
'''
def __init__(self, *args, **kwargs):
'''Initialize a :class:`StringCounter` with existing counts::
>>> sc = StringCounter(a=4, b=2, c=0)
>>> sc['b']
2
See the documentation for :class:`collections.Counter` for more
examples.
'''
self.read_only = False
self.generation = self.current_generation
super(StringCounter, self).__init__(*args, **kwargs)
def next_generation(self):
'''Increment the generation counter on this collection.'''
self.current_generation += 1
self.generation = self.current_generation
@mutates
def __delitem__(self, key):
return super(StringCounter, self).__delitem__(key)
@staticmethod
def _fix_key(key):
'''Normalize keys to Unicode strings.'''
if isinstance(key, unicode):
return key
if isinstance(key, str):
# On my system, the default encoding is `ascii`, so let's
# explicitly say UTF-8?
return unicode(key, 'utf-8')
raise TypeError(key)
@mutates
def __setitem__(self, key, value):
key = self._fix_key(key)
return super(StringCounter, self).__setitem__(key, value)
@mutates
def pop(self, key):
return super(StringCounter, self).pop(key)
@mutates
def popitem(self, key):
return super(StringCounter, self).popitem(key)
@mutates
def subtract(self, other):
return super(StringCounter, self).subtract(other)
@mutates
def update(self, iterable=None, **kwargs):
# Force all keys into Unicode strings before calling base
# class implementation; if kwargs is non-empty then the base
# class will call this method again
if iterable:
if isinstance(iterable, Mapping):
new_iterable = {}
for (k, v) in iterable.iteritems():
new_iterable[self._fix_key(k)] = v
iterable = new_iterable
else:
iterable = (self._fix_key(k) for k in iterable)
return super(StringCounter, self).update(iterable, **kwargs)
def __add__(self, other):
result = super(StringCounter, self).__add__(other)
return StringCounter(result)
def __sub__(self, other):
result = super(StringCounter, self).__sub__(other)
return StringCounter(result)
@mutates
def __imul__(self, coef):
for k in self.keys():
self[k] *= coef
return self
@mutates
def truncate_most_common(self, truncation_length):
'''
Sorts the counter and keeps only the most common items up to
``truncation_length`` in place.
:type truncation_length: int
'''
keep_keys = set(v[0] for v in self.most_common(truncation_length))
for key in self.keys():
if key not in keep_keys:
self.pop(key)
class StringCounterSerializer(object):
def __init__(self):
raise NotImplementedError()
loads = StringCounter
@staticmethod
def dumps(sc):
return dict(sc)
constructor = StringCounter
class NestedStringCounter(MutableMapping):
'''A mapping from string to string counter.
'''
def __init__(self, data=None):
self.data = {}
if data is not None:
for fname, counter in data.items():
if fname not in self.data:
self.data[fname] = StringCounter()
for key, count in counter.iteritems():
self.data[fname][key] = count
def to_nested_dict(self):
dumped = {}
for key, counter in self.data.iteritems():
dumped[key] = dict(counter)
return dumped
@staticmethod
def from_nested_dict(d):
return NestedStringCounter(data=d)
def __getitem__(self, key):
return self.data.get(uni(key)) or self.__missing__(key)
def __missing__(self, key):
v = StringCounter()
self[uni(key)] = v
return v
def __contains__(self, key):
return key in self.data
def __setitem__(self, key, value):
self.data[uni(key)] = value
def __delitem__(self, key):
del self.data[uni(key)]
def __len__(self): return len(self.data)
def __iter__(self): return iter(self.data)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self.data))
def __add__(self, other):
new_nsc = NestedStringCounter(self.to_nested_dict())
for key, counter in other.items():
if key in new_nsc:
new_nsc[key] += counter
else:
new_nsc[key] = counter
return new_nsc
class NestedStringCounterSerializer(object):
'''Serialize nested string counters.'''
def __init__(self):
raise NotImplementedError()
dumps = NestedStringCounter.to_nested_dict
constructor = NestedStringCounter
@staticmethod
def loads(d):
return NestedStringCounter.from_nested_dict(d)
| |
## python script to crossfilter out and run Pade approximation on a database
## inputs : Pade script nls.R,
## and one of database source csv or path to crossfiltered named
## files
## the necessary details to fit a Pade approximation for
## variety of functions.
import pandas as pd
import numpy as np
import os
from glob import glob
import sys
def crossfilters(database):
"""
crossfilter out completely a collection
"""
database = database #pd.read_csv('MainCollection.csv')
# crossfilter down to VASP's fcc Nb Bulk modulus
names = []
codes = np.unique(database['code'])
for c in codes:
code = database[database['code']==c]
structures = np.unique(code['structure'])
for struct in structures:
struct_code = code[code['structure']==struct]
exchanges = np.unique(struct_code['exchange'])
for ex in exchanges:
ex_struct_code = struct_code[struct_code['exchange']==ex]
elements = np.unique(ex_struct_code['element'])
for el in elements:
el_ex_struct_code = ex_struct_code[ex_struct_code['element']==el]
properties = el_ex_struct_code['property']
for pr in properties:
pr_el_ex_struct_code = el_ex_struct_code[el_ex_struct_code['property']==pr]
prop = list(pr_el_ex_struct_code['value'])
kpts = list(pr_el_ex_struct_code['k-point'])
k_atom = [ k**3 for k in kpts ]
Pade_df = pd.DataFrame({'Kpts_atom': k_atom, 'P': prop})
TAG = {'element':el,
'structure':struct,
'exchange':ex,
'code':c,
'property':pr}
NAME = '_'.join([pr, el, ex, struct, c])+'.csv'
names.append( (NAME,TAG) )
print ("Writing {} ..".format(NAME))
Pade_df.to_csv('Crossfilts/'+NAME, index=False)
return names
def read_crossfilts_from_file(filename):
"""
reads the crossfiltered file and also decomposes the filename
into the tags and sends the crossfilt and the tags
"""
if len(filename[11:-4].split('_')) == 6:
pr, el, ex, _, struct, c = filename[11:-4].split('_')
ex = '_'.join([ex,_])
else:
pr, el, ex, struct, c = filename[11:-4].split('_')
tags = {'element': el,
'property': pr,
'exchange': ex,
'code': c,
'structure':struct}
return filename, tags
def run_pade_through_R(rscript, crossfilt, tags):
"""
runs the Pade through a python subprocess call to nls.R
on the input crossfilt
- copies the input to Rdata.csv for input to nls.R
- retrieves the output of nls.R that is pasted out into csv file
that can be read back into pandas
.. element, structure, exchange, code, property, extrapolate, fit error
which can serve as another reference collection for calculation of
the precision from the main database.
"""
result = {'element':tags['element'],
'structure':tags['structure'],
'exchange':tags['exchange'],
'code':tags['code'],
'property':tags['property']}
os.system('cp {} Rdata.csv'.format(crossfilt))
# for making the first database
# os.system('cp Crossfilts/{} Rdata.csv'.format(crossfilt))
# os.mkdir(crossfilt)
#os.chdir(crossfilt)
#os.system('cp ../{} Rdata.csv'.format(crossfilt))
#os.system('cp ../{0} {0}'.format(rscript))
print ('copied {}'.format(crossfilt))
try:
os.system('Rscript {}'.format(rscript))
print ('R executed')
R_result = pd.read_csv('Result.csv')
key = list(R_result['Error']).index(min(list(R_result['Error'])))
result['extrapolate'] = list(R_result['Extrapolate'])#[key]
result['best_extrapolate'] = list(R_result['Extrapolate'])[key]
result['best_error'] = list(R_result['Error'])[key]
result['best_order'] = list(R_result['Order'])[key]
result['fit_error'] = list(R_result['Error'])#[key]
result['pade_order'] = list(R_result['Order'])#[key]
#result['precision'] = list(R_result['Precisions'])
print ("R success")
except:
print ("R failure")
result['best_extrapolate'] = 'xxx'
result['best_error'] = 'xxx'
result['best_order'] = 'xxx'
result['extrapolate'] = 'xxx'
result['fit_error'] = 'xxx'
result['pade_order'] = 'xxx'
# os.chdir('../')
#print (result, type(result))
#pade_result = pd.DataFrame(result)
return result
if __name__=='__main__':
"""
calculate the fit for a given crossfiltered set
for different Pade sets
first Milestone - one crossfiltered set :
Nb B for m+n orders (m, n =2-4) .. output file Pade.csv
"""
#database_path = 'MainCollection_v2moreclean.csv'
rscript = 'hennig_nls.R'#'nls_kpts_choices.R'
database_path = None
crossfilts_path = 'Crossfilts/*.csv'
#crossfilts_path = None
output_filename = 'Pade_extrapolates_v2.csv'#'Pade_kpts_choices_leave3_10.csv'
if database_path:
print ("Performing crossfiltering on {}..".format(database_path))
filetags = crossfilters(pd.read_csv(database_path))
elif crossfilts_path:
print ("Reading crossfilters from {}..".format(crossfilts_path))
filetags = [read_crossfilts_from_file(f) for f in glob(crossfilts_path) ]
length_crossfilts = len(filetags)
else:
print ('input not provided')
sys.exit(0)
records = []
print ("Running Pade..")
for n, (f,t) in enumerate(filetags):
print ("Running through {0} of {1}".format(n, length_crossfilts))
records.append( run_pade_through_R(rscript, f, t) )
Pade_analysis= pd.DataFrame({'element': [r['element'] for r in records],
'structure': [r['structure'] for r in records],
'exchange': [r['exchange'] for r in records],
'code': [r['code'] for r in records],
'property': [r['property'] for r in records],
'best_extrapolate': [r['best_extrapolate'] for r in records],
'best_error': [r['best_error'] for r in records],
'best_order': [r['best_order'] for r in records],
'extrapolate': [r['extrapolate'] for r in records],
'fit_error': [r['fit_error'] for r in records],
'pade_order': [r['pade_order'] for r in records] })
# pade_analysis = pd.concat(records)
# remove the index and duplicates
print ("Writing out Pade analysis... ")
Pade_analysis.to_csv(output_filename)
Pade_analysis = pd.read_csv(output_filename)
del Pade_analysis['Unnamed: 0']
Pade_analysis.drop_duplicates(inplace=True)
Pade_analysis.to_csv(output_filename)
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import posixpath
import traceback
from app_yaml_helper import AppYamlHelper
from appengine_wrappers import IsDeadlineExceededError, logservice
from branch_utility import BranchUtility
from compiled_file_system import CompiledFileSystem
from data_source_registry import CreateDataSources
from environment import GetAppVersion, IsDevServer
from extensions_paths import EXAMPLES, PUBLIC_TEMPLATES, STATIC_DOCS
from file_system_util import CreateURLsFromPaths
from future import Future
from gcs_file_system_provider import CloudStorageFileSystemProvider
from github_file_system_provider import GithubFileSystemProvider
from host_file_system_provider import HostFileSystemProvider
from object_store_creator import ObjectStoreCreator
from render_servlet import RenderServlet
from server_instance import ServerInstance
from servlet import Servlet, Request, Response
from special_paths import SITE_VERIFICATION_FILE
from timer import Timer, TimerClosure
class _SingletonRenderServletDelegate(RenderServlet.Delegate):
def __init__(self, server_instance):
self._server_instance = server_instance
def CreateServerInstance(self):
return self._server_instance
class _CronLogger(object):
'''Wraps the logging.* methods to prefix them with 'cron' and flush
immediately. The flushing is important because often these cron runs time
out and we lose the logs.
'''
def info(self, msg, *args): self._log(logging.info, msg, args)
def warning(self, msg, *args): self._log(logging.warning, msg, args)
def error(self, msg, *args): self._log(logging.error, msg, args)
def _log(self, logfn, msg, args):
try:
logfn('cron: %s' % msg, *args)
finally:
logservice.flush()
_cronlog = _CronLogger()
def _RequestEachItem(title, items, request_callback):
'''Runs a task |request_callback| named |title| for each item in |items|.
|request_callback| must take an item and return a servlet response.
Returns true if every item was successfully run, false if any return a
non-200 response or raise an exception.
'''
_cronlog.info('%s: starting', title)
success_count, failure_count = 0, 0
timer = Timer()
try:
for i, item in enumerate(items):
def error_message(detail):
return '%s: error rendering %s (%s of %s): %s' % (
title, item, i + 1, len(items), detail)
try:
response = request_callback(item)
if response.status == 200:
success_count += 1
else:
_cronlog.error(error_message('response status %s' % response.status))
failure_count += 1
except Exception as e:
_cronlog.error(error_message(traceback.format_exc()))
failure_count += 1
if IsDeadlineExceededError(e): raise
finally:
_cronlog.info('%s: rendered %s of %s with %s failures in %s',
title, success_count, len(items), failure_count,
timer.Stop().FormatElapsed())
return success_count == len(items)
class CronServlet(Servlet):
'''Servlet which runs a cron job.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or CronServlet.Delegate()
class Delegate(object):
'''CronServlet's runtime dependencies. Override for testing.
'''
def CreateBranchUtility(self, object_store_creator):
return BranchUtility.Create(object_store_creator)
def CreateHostFileSystemProvider(self,
object_store_creator,
max_trunk_revision=None):
return HostFileSystemProvider(object_store_creator,
max_trunk_revision=max_trunk_revision)
def CreateGithubFileSystemProvider(self, object_store_creator):
return GithubFileSystemProvider(object_store_creator)
def CreateGCSFileSystemProvider(self, object_store_creator):
return CloudStorageFileSystemProvider(object_store_creator)
def GetAppVersion(self):
return GetAppVersion()
def Get(self):
# Crons often time out, and if they do we need to make sure to flush the
# logs before the process gets killed (Python gives us a couple of
# seconds).
#
# So, manually flush logs at the end of the cron run. However, sometimes
# even that isn't enough, which is why in this file we use _cronlog and
# make it flush the log every time its used.
logservice.AUTOFLUSH_ENABLED = False
try:
return self._GetImpl()
except BaseException:
_cronlog.error('Caught top-level exception! %s', traceback.format_exc())
finally:
logservice.flush()
def _GetImpl(self):
# Cron strategy:
#
# Find all public template files and static files, and render them. Most of
# the time these won't have changed since the last cron run, so it's a
# little wasteful, but hopefully rendering is really fast (if it isn't we
# have a problem).
_cronlog.info('starting')
# This is returned every time RenderServlet wants to create a new
# ServerInstance.
#
# TODO(kalman): IMPORTANT. This sometimes throws an exception, breaking
# everything. Need retry logic at the fetcher level.
server_instance = self._GetSafeServerInstance()
trunk_fs = server_instance.host_file_system_provider.GetTrunk()
def render(path):
request = Request(path, self._request.host, self._request.headers)
delegate = _SingletonRenderServletDelegate(server_instance)
return RenderServlet(request, delegate).Get()
def request_files_in_dir(path, prefix='', strip_ext=None):
'''Requests every file found under |path| in this host file system, with
a request prefix of |prefix|. |strip_ext| is an optional list of file
extensions that should be stripped from paths before requesting.
'''
def maybe_strip_ext(name):
if name == SITE_VERIFICATION_FILE or not strip_ext:
return name
base, ext = posixpath.splitext(name)
return base if ext in strip_ext else name
files = [maybe_strip_ext(name)
for name, _ in CreateURLsFromPaths(trunk_fs, path, prefix)]
return _RequestEachItem(path, files, render)
results = []
try:
# Start running the hand-written Cron methods first; they can be run in
# parallel. They are resolved at the end.
def run_cron_for_future(target):
title = target.__class__.__name__
future, init_timer = TimerClosure(target.Cron)
assert isinstance(future, Future), (
'%s.Cron() did not return a Future' % title)
def resolve():
resolve_timer = Timer()
try:
future.Get()
except Exception as e:
_cronlog.error('%s: error %s' % (title, traceback.format_exc()))
results.append(False)
if IsDeadlineExceededError(e): raise
finally:
resolve_timer.Stop()
_cronlog.info('%s took %s: %s to initialize and %s to resolve' %
(title,
init_timer.With(resolve_timer).FormatElapsed(),
init_timer.FormatElapsed(),
resolve_timer.FormatElapsed()))
return Future(callback=resolve)
targets = (CreateDataSources(server_instance).values() +
[server_instance.content_providers,
server_instance.platform_bundle])
title = 'initializing %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
cron_futures = [run_cron_for_future(target) for target in targets]
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
# Samples are too expensive to run on the dev server, where there is no
# parallel fetch.
#
# XXX(kalman): Currently samples are *always* too expensive to fetch, so
# disabling them for now. It won't break anything so long as we're still
# not enforcing that everything gets cached for normal instances.
if False: # should be "not IsDevServer()":
# Fetch each individual sample file.
results.append(request_files_in_dir(EXAMPLES,
prefix='extensions/examples'))
# Resolve the hand-written Cron method futures.
title = 'resolving %s parallel Cron targets' % len(targets)
_cronlog.info(title)
timer = Timer()
try:
for future in cron_futures:
future.Get()
finally:
_cronlog.info('%s took %s' % (title, timer.Stop().FormatElapsed()))
except:
results.append(False)
# This should never actually happen (each cron step does its own
# conservative error checking), so re-raise no matter what it is.
_cronlog.error('uncaught error: %s' % traceback.format_exc())
raise
finally:
success = all(results)
_cronlog.info('finished (%s)', 'success' if success else 'FAILED')
return (Response.Ok('Success') if success else
Response.InternalError('Failure'))
def _GetSafeServerInstance(self):
'''Returns a ServerInstance with a host file system at a safe revision,
meaning the last revision that the current running version of the server
existed.
'''
delegate = self._delegate
# IMPORTANT: Get a ServerInstance pinned to the most recent revision, not
# HEAD. These cron jobs take a while and run very frequently such that
# there is usually one running at any given time, and eventually a file
# that we're dealing with will change underneath it, putting the server in
# an undefined state.
server_instance_near_head = self._CreateServerInstance(
self._GetMostRecentRevision())
app_yaml_handler = AppYamlHelper(
server_instance_near_head.object_store_creator,
server_instance_near_head.host_file_system_provider)
if app_yaml_handler.IsUpToDate(delegate.GetAppVersion()):
return server_instance_near_head
# The version in app.yaml is greater than the currently running app's.
# The safe version is the one before it changed.
safe_revision = app_yaml_handler.GetFirstRevisionGreaterThan(
delegate.GetAppVersion()) - 1
_cronlog.info('app version %s is out of date, safe is %s',
delegate.GetAppVersion(), safe_revision)
return self._CreateServerInstance(safe_revision)
def _GetMostRecentRevision(self):
'''Gets the revision of the most recent patch submitted to the host file
system. This is similar to HEAD but it's a concrete revision so won't
change as the cron runs.
'''
head_fs = (
self._CreateServerInstance(None).host_file_system_provider.GetTrunk())
return head_fs.Stat('').version
def _CreateServerInstance(self, revision):
'''Creates a ServerInstance pinned to |revision|, or HEAD if None.
NOTE: If passed None it's likely that during the cron run patches will be
submitted at HEAD, which may change data underneath the cron run.
'''
object_store_creator = ObjectStoreCreator(start_empty=True)
branch_utility = self._delegate.CreateBranchUtility(object_store_creator)
host_file_system_provider = self._delegate.CreateHostFileSystemProvider(
object_store_creator, max_trunk_revision=revision)
github_file_system_provider = self._delegate.CreateGithubFileSystemProvider(
object_store_creator)
gcs_file_system_provider = self._delegate.CreateGCSFileSystemProvider(
object_store_creator)
return ServerInstance(object_store_creator,
CompiledFileSystem.Factory(object_store_creator),
branch_utility,
host_file_system_provider,
github_file_system_provider,
gcs_file_system_provider)
| |
# -*- test-case-name: foolscap.test.test_banana -*-
from __future__ import print_function
import six
from zope.interface import implementer
from twisted.internet.defer import Deferred
from foolscap import tokens
from foolscap.tokens import Violation, BananaError
from foolscap.slicer import BaseUnslicer, ReferenceSlicer
from foolscap.slicer import UnslicerRegistry, BananaUnslicerRegistry
from foolscap.slicers.vocab import ReplaceVocabularyTable, AddToVocabularyTable
from foolscap.util import ensure_tuple_str
from foolscap import copyable # does this create a cycle?
from twisted.python import log
from functools import reduce
@implementer(tokens.ISlicer, tokens.IRootSlicer)
class RootSlicer:
streamableInGeneral = True
producingDeferred = None
objectSentDeferred = None
slicerTable = {}
debug = False
def __init__(self, protocol):
self.protocol = protocol
self.sendQueue = []
def allowStreaming(self, streamable):
self.streamableInGeneral = streamable
def registerRefID(self, refid, obj):
pass
def slicerForObject(self, obj):
# could use a table here if you think it'd be faster than an
# adapter lookup
if self.debug: log.msg("slicerForObject(%s)" % type(obj))
# do the adapter lookup first, so that registered adapters override
# UnsafeSlicerTable's InstanceSlicer
slicer = tokens.ISlicer(obj, None)
if slicer:
if self.debug: log.msg("got ISlicer %s" % slicer)
return slicer
# zope.interface doesn't do transitive adaptation, which is a shame
# because we want to let people register ICopyable adapters for
# third-party code, and there is an ICopyable->ISlicer adapter
# defined in copyable.py, but z.i won't do the transitive
# ThirdPartyClass -> ICopyable -> ISlicer
# so instead we manually do it here
copier = copyable.ICopyable(obj, None)
if copier:
s = tokens.ISlicer(copier)
return s
slicerFactory = self.slicerTable.get(type(obj))
if slicerFactory:
if self.debug: log.msg(" got slicerFactory %s" % slicerFactory)
return slicerFactory(obj)
name = str(type(obj))
if self.debug: log.msg("cannot serialize %s (%s)" % (obj, name))
raise Violation("cannot serialize %s (%s)" % (obj, name))
sliceAlreadyCalled = False
def slice(self):
# this may only be called once
assert not self.sliceAlreadyCalled
self.sliceAlreadyCalled = True
return iter(self)
def __iter__(self):
return self
def __next__(self):
if self.objectSentDeferred:
self.objectSentDeferred.callback(None)
self.objectSentDeferred = None
if self.sendQueue:
(obj, self.objectSentDeferred) = self.sendQueue.pop()
self.streamable = self.streamableInGeneral
return obj
if self.protocol.debugSend:
print("LAST BAG")
self.producingDeferred = Deferred()
self.streamable = True
return self.producingDeferred
next = __next__
def childAborted(self, f):
assert self.objectSentDeferred
self.objectSentDeferred.errback(f)
self.objectSentDeferred = None
return None
def send(self, obj):
# obj can also be a Slicer, say, a CallSlicer. We return a Deferred
# which fires when the object has been fully serialized.
idle = (len(self.protocol.slicerStack) == 1) and not self.sendQueue
objectSentDeferred = Deferred()
self.sendQueue.append((obj, objectSentDeferred))
if idle:
# wake up
if self.protocol.debugSend:
print(" waking up to send")
if self.producingDeferred:
d = self.producingDeferred
self.producingDeferred = None
# TODO: consider reactor.callLater(0, d.callback, None)
# I'm not sure it's actually necessary, though
d.callback(None)
return objectSentDeferred
def describe(self):
return "<RootSlicer>"
def connectionLost(self, why):
# abandon everything we wanted to send
if self.objectSentDeferred:
self.objectSentDeferred.errback(why)
self.objectSentDeferred = None
for obj, d in self.sendQueue:
d.errback(why)
self.sendQueue = []
class ScopedRootSlicer(RootSlicer):
# this combines RootSlicer with foolscap.slicer.ScopedSlicer . The funny
# self-delegation of slicerForObject() means we can't just inherit from
# both. It would be nice to refactor everything to make this cleaner.
def __init__(self, obj):
RootSlicer.__init__(self, obj)
self.references = {} # maps id(obj) -> (obj,refid)
def registerRefID(self, refid, obj):
self.references[id(obj)] = (obj,refid)
def slicerForObject(self, obj):
# check for an object which was sent previously or has at least
# started sending
obj_refid = self.references.get(id(obj), None)
if obj_refid is not None:
# we've started to send this object already, so just include a
# reference to it
return ReferenceSlicer(obj_refid[1])
# otherwise go upstream so we can serialize the object completely
return RootSlicer.slicerForObject(self, obj)
class RootUnslicer(BaseUnslicer):
# topRegistries is used for top-level objects
topRegistries = [UnslicerRegistry, BananaUnslicerRegistry]
# openRegistries is used for everything at lower levels
openRegistries = [UnslicerRegistry]
constraint = None
openCount = None
def __init__(self, protocol):
self.protocol = protocol
self.objects = {}
keys = []
for r in self.topRegistries + self.openRegistries:
for k in list(r.keys()):
keys.append(len(k[0]))
self.maxIndexLength = reduce(max, keys)
def start(self, count):
pass
def setConstraint(self, constraint):
# this constraints top-level objects. E.g., if this is an
# IntegerConstraint, then only integers will be accepted.
self.constraint = constraint
def checkToken(self, typebyte, size):
if self.constraint:
self.constraint.checkToken(typebyte, size)
def openerCheckToken(self, typebyte, size, opentype):
if typebyte == tokens.STRING:
if size > self.maxIndexLength:
why = "STRING token is too long, %d>%d" % \
(size, self.maxIndexLength)
raise Violation(why)
elif typebyte == tokens.VOCAB:
return
else:
# TODO: hack for testing
raise Violation("index token 0x%02x not STRING or VOCAB" % \
six.byte2int(typebyte))
raise BananaError("index token 0x%02x not STRING or VOCAB" % \
six.byte2int(typebyte))
def open(self, opentype):
# called (by delegation) by the top Unslicer on the stack, regardless
# of what kind of unslicer it is. This is only used for "internal"
# objects: non-top-level nodes
assert len(self.protocol.receiveStack) > 1
opentype = ensure_tuple_str(opentype)
if opentype[0] == 'copyable':
if len(opentype) > 1:
copyablename = opentype[1]
try:
factory = copyable.CopyableRegistry[copyablename]
except KeyError:
raise Violation("unknown RemoteCopy name '%s'" \
% copyablename)
child = factory()
return child
return None # still waiting for copyablename
for reg in self.openRegistries:
opener = reg.get(opentype)
if opener is not None:
child = opener()
return child
raise Violation("unknown OPEN type %s" % (opentype,))
def doOpen(self, opentype):
# this is only called for top-level objects
assert len(self.protocol.receiveStack) == 1
opentype = ensure_tuple_str(opentype)
if self.constraint:
self.constraint.checkOpentype(opentype)
for reg in self.topRegistries:
opener = reg.get(opentype)
if opener is not None:
child = opener()
break
else:
raise Violation("unknown top-level OPEN type %s" % (opentype,))
if self.constraint:
child.setConstraint(self.constraint)
return child
def receiveChild(self, obj, ready_deferred=None):
assert not isinstance(obj, Deferred)
assert ready_deferred is None
if self.protocol.debugReceive:
print("RootUnslicer.receiveChild(%s)" % (obj,))
self.objects = {}
if obj in (ReplaceVocabularyTable, AddToVocabularyTable):
# the unslicer has already changed the vocab table
return
if self.protocol.exploded:
print("protocol exploded, can't deliver object")
print(self.protocol.exploded)
self.protocol.receivedObject(self.protocol.exploded)
return
self.protocol.receivedObject(obj) # give finished object to Banana
def receiveClose(self):
raise BananaError("top-level should never receive CLOSE tokens")
def reportViolation(self, why):
return self.protocol.reportViolation(why)
def describe(self):
return "<RootUnslicer>"
def setObject(self, counter, obj):
pass
def getObject(self, counter):
return None
class ScopedRootUnslicer(RootUnslicer):
# combines RootUnslicer and ScopedUnslicer
def __init__(self, protocol):
RootUnslicer.__init__(self, protocol)
self.references = {}
def setObject(self, counter, obj):
self.references[counter] = obj
def getObject(self, counter):
obj = self.references.get(counter)
return obj
| |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from django.core.management.base import BaseCommand, CommandParser
from django.utils import timezone
from zerver.models import Message, UserProfile, Stream, Recipient, UserPresence, \
Subscription, get_huddle, Realm, UserMessage, RealmAlias, \
clear_database, get_client, get_user_profile_by_id, \
email_to_username
from zerver.lib.actions import STREAM_ASSIGNMENT_COLORS, do_send_messages, \
do_change_is_admin
from django.conf import settings
from zerver.lib.bulk_create import bulk_create_clients, \
bulk_create_streams, bulk_create_users, bulk_create_huddles
from zerver.models import DefaultStream, get_stream, get_realm
import random
import os
from optparse import make_option
from six.moves import range
from typing import Any, Callable, Dict, List, Iterable, Mapping, Sequence, Set, Tuple, Text
settings.TORNADO_SERVER = None
# Disable using memcached caches to avoid 'unsupported pickle
# protocol' errors if `populate_db` is run with a different Python
# from `run-dev.py`.
settings.CACHES['default'] = {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'
}
def create_users(realm, name_list, bot_type=None):
# type: (Realm, Iterable[Tuple[Text, Text]], int) -> None
user_set = set() # type: Set[Tuple[Text, Text, Text, bool]]
for full_name, email in name_list:
short_name = email_to_username(email)
user_set.add((email, full_name, short_name, True))
tos_version = settings.TOS_VERSION if bot_type is None else None
bulk_create_users(realm, user_set, bot_type=bot_type, tos_version=tos_version)
class Command(BaseCommand):
help = "Populate a test database"
def add_arguments(self, parser):
# type: (CommandParser) -> None
parser.add_argument('-n', '--num-messages',
dest='num_messages',
type=int,
default=600,
help='The number of messages to create.')
parser.add_argument('--extra-users',
dest='extra_users',
type=int,
default=0,
help='The number of extra users to create')
parser.add_argument('--extra-bots',
dest='extra_bots',
type=int,
default=0,
help='The number of extra bots to create')
parser.add_argument('--huddles',
dest='num_huddles',
type=int,
default=3,
help='The number of huddles to create.')
parser.add_argument('--personals',
dest='num_personals',
type=int,
default=6,
help='The number of personal pairs to create.')
parser.add_argument('--threads',
dest='threads',
type=int,
default=10,
help='The number of threads to use.')
parser.add_argument('--percent-huddles',
dest='percent_huddles',
type=float,
default=15,
help='The percent of messages to be huddles.')
parser.add_argument('--percent-personals',
dest='percent_personals',
type=float,
default=15,
help='The percent of messages to be personals.')
parser.add_argument('--stickyness',
dest='stickyness',
type=float,
default=20,
help='The percent of messages to repeat recent folks.')
parser.add_argument('--nodelete',
action="store_false",
default=True,
dest='delete',
help='Whether to delete all the existing messages.')
parser.add_argument('--test-suite',
default=False,
action="store_true",
help='Whether to delete all the existing messages.')
def handle(self, **options):
# type: (**Any) -> None
if options["percent_huddles"] + options["percent_personals"] > 100:
self.stderr.write("Error! More than 100% of messages allocated.\n")
return
if options["delete"]:
# Start by clearing all the data in our database
clear_database()
# Create our two default realms
# Could in theory be done via zerver.lib.actions.do_create_realm, but
# welcome-bot (needed for do_create_realm) hasn't been created yet
zulip_realm = Realm.objects.create(
string_id="zulip", name="Zulip Dev", restricted_to_domain=True,
invite_required=False, org_type=Realm.CORPORATE, domain="zulip.com")
RealmAlias.objects.create(realm=zulip_realm, domain="zulip.com")
if options["test_suite"]:
mit_realm = Realm.objects.create(
string_id="zephyr", name="MIT", restricted_to_domain=True,
invite_required=False, org_type=Realm.CORPORATE, domain="mit.edu")
RealmAlias.objects.create(realm=mit_realm, domain="mit.edu")
# Create test Users (UserProfiles are automatically created,
# as are subscriptions to the ability to receive personals).
names = [
("Zoe", "ZOE@zulip.com"),
("Othello, the Moor of Venice", "othello@zulip.com"),
("Iago", "iago@zulip.com"),
("Prospero from The Tempest", "prospero@zulip.com"),
("Cordelia Lear", "cordelia@zulip.com"),
("King Hamlet", "hamlet@zulip.com"),
("aaron", "AARON@zulip.com"),
]
for i in range(options["extra_users"]):
names.append(('Extra User %d' % (i,), 'extrauser%d@zulip.com' % (i,)))
create_users(zulip_realm, names)
iago = UserProfile.objects.get(email="iago@zulip.com")
do_change_is_admin(iago, True)
# Create public streams.
stream_list = ["Verona", "Denmark", "Scotland", "Venice", "Rome"]
stream_dict = {
"Verona": {"description": "A city in Italy", "invite_only": False},
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Scotland": {"description": "Located in the United Kingdom", "invite_only": False},
"Venice": {"description": "A northeastern Italian city", "invite_only": False},
"Rome": {"description": "Yet another Italian city", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
bulk_create_streams(zulip_realm, stream_dict)
recipient_streams = [Stream.objects.get(name=name, realm=zulip_realm).id
for name in stream_list] # type: List[int]
# Create subscriptions to streams. The following
# algorithm will give each of the users a different but
# deterministic subset of the streams (given a fixed list
# of users).
subscriptions_to_add = [] # type: List[Subscription]
profiles = UserProfile.objects.select_related().all().order_by("email") # type: Sequence[UserProfile]
for i, profile in enumerate(profiles):
# Subscribe to some streams.
for type_id in recipient_streams[:int(len(recipient_streams) *
float(i)/len(profiles)) + 1]:
r = Recipient.objects.get(type=Recipient.STREAM, type_id=type_id)
s = Subscription(
recipient=r,
user_profile=profile,
color=STREAM_ASSIGNMENT_COLORS[i % len(STREAM_ASSIGNMENT_COLORS)])
subscriptions_to_add.append(s)
Subscription.objects.bulk_create(subscriptions_to_add)
else:
zulip_realm = get_realm("zulip")
recipient_streams = [klass.type_id for klass in
Recipient.objects.filter(type=Recipient.STREAM)]
# Extract a list of all users
user_profiles = list(UserProfile.objects.all()) # type: List[UserProfile]
if not options["test_suite"]:
# Populate users with some bar data
for user in user_profiles:
status = UserPresence.ACTIVE # type: int
date = timezone.now()
client = get_client("website")
if user.full_name[0] <= 'H':
client = get_client("ZulipAndroid")
UserPresence.objects.get_or_create(user_profile=user, client=client, timestamp=date, status=status)
user_profiles_ids = [user_profile.id for user_profile in user_profiles]
# Create several initial huddles
for i in range(options["num_huddles"]):
get_huddle(random.sample(user_profiles_ids, random.randint(3, 4)))
# Create several initial pairs for personals
personals_pairs = [random.sample(user_profiles_ids, 2)
for i in range(options["num_personals"])]
threads = options["threads"]
jobs = [] # type: List[Tuple[int, List[List[int]], Dict[str, Any], Callable[[str], int]]]
for i in range(threads):
count = options["num_messages"] // threads
if i < options["num_messages"] % threads:
count += 1
jobs.append((count, personals_pairs, options, self.stdout.write))
for job in jobs:
send_messages(job)
if options["delete"]:
# Create the "website" and "API" clients; if we don't, the
# default values in zerver/decorators.py will not work
# with the Django test suite.
get_client("website")
get_client("API")
if options["test_suite"]:
# Create test users; the MIT ones are needed to test
# the Zephyr mirroring codepaths.
testsuite_mit_users = [
("Fred Sipb (MIT)", "sipbtest@mit.edu"),
("Athena Consulting Exchange User (MIT)", "starnine@mit.edu"),
("Esp Classroom (MIT)", "espuser@mit.edu"),
]
create_users(mit_realm, testsuite_mit_users)
# These bots are directly referenced from code and thus
# are needed for the test suite.
all_realm_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
zulip_realm_bots = [
("Zulip New User Bot", "new-user-bot@zulip.com"),
("Zulip Error Bot", "error-bot@zulip.com"),
("Zulip Default Bot", "default-bot@zulip.com"),
]
for i in range(options["extra_bots"]):
zulip_realm_bots.append(('Extra Bot %d' % (i,), 'extrabot%d@zulip.com' % (i,)))
zulip_realm_bots.extend(all_realm_bots)
create_users(zulip_realm, zulip_realm_bots, bot_type=UserProfile.DEFAULT_BOT)
zulip_webhook_bots = [
("Zulip Webhook Bot", "webhook-bot@zulip.com"),
]
create_users(zulip_realm, zulip_webhook_bots, bot_type=UserProfile.INCOMING_WEBHOOK_BOT)
create_simple_community_realm()
if not options["test_suite"]:
# Initialize the email gateway bot as an API Super User
email_gateway_bot = UserProfile.objects.get(email__iexact=settings.EMAIL_GATEWAY_BOT)
email_gateway_bot.is_api_super_user = True
email_gateway_bot.save()
# To keep the messages.json fixtures file for the test
# suite fast, don't add these users and subscriptions
# when running populate_db for the test suite
zulip_stream_dict = {
"devel": {"description": "For developing", "invite_only": False},
"all": {"description": "For everything", "invite_only": False},
"announce": {"description": "For announcements", "invite_only": False},
"design": {"description": "For design", "invite_only": False},
"support": {"description": "For support", "invite_only": False},
"social": {"description": "For socializing", "invite_only": False},
"test": {"description": "For testing", "invite_only": False},
"errors": {"description": "For errors", "invite_only": False},
"sales": {"description": "For sales discussion", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
bulk_create_streams(zulip_realm, zulip_stream_dict)
# Now that we've created the notifications stream, configure it properly.
zulip_realm.notifications_stream = get_stream("announce", zulip_realm)
zulip_realm.save(update_fields=['notifications_stream'])
# Add a few default streams
for default_stream_name in ["design", "devel", "social", "support"]:
DefaultStream.objects.create(realm=zulip_realm,
stream=get_stream(default_stream_name, zulip_realm))
# Now subscribe everyone to these streams
subscriptions_to_add = []
profiles = UserProfile.objects.select_related().filter(realm=zulip_realm)
for i, stream_name in enumerate(zulip_stream_dict):
stream = Stream.objects.get(name=stream_name, realm=zulip_realm)
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
for profile in profiles:
# Subscribe to some streams.
s = Subscription(
recipient=recipient,
user_profile=profile,
color=STREAM_ASSIGNMENT_COLORS[i % len(STREAM_ASSIGNMENT_COLORS)])
subscriptions_to_add.append(s)
Subscription.objects.bulk_create(subscriptions_to_add)
# These bots are not needed by the test suite
internal_zulip_users_nosubs = [
("Zulip Commit Bot", "commit-bot@zulip.com"),
("Zulip Trac Bot", "trac-bot@zulip.com"),
("Zulip Nagios Bot", "nagios-bot@zulip.com"),
]
create_users(zulip_realm, internal_zulip_users_nosubs, bot_type=UserProfile.DEFAULT_BOT)
zulip_cross_realm_bots = [
("Zulip Feedback Bot", "feedback@zulip.com"),
]
create_users(zulip_realm, zulip_cross_realm_bots, bot_type=UserProfile.DEFAULT_BOT)
# Mark all messages as read
UserMessage.objects.all().update(flags=UserMessage.flags.read)
self.stdout.write("Successfully populated test database.\n")
recipient_hash = {} # type: Dict[int, Recipient]
def get_recipient_by_id(rid):
# type: (int) -> Recipient
if rid in recipient_hash:
return recipient_hash[rid]
return Recipient.objects.get(id=rid)
# Create some test messages, including:
# - multiple streams
# - multiple subjects per stream
# - multiple huddles
# - multiple personals converastions
# - multiple messages per subject
# - both single and multi-line content
def send_messages(data):
# type: (Tuple[int, Sequence[Sequence[int]], Mapping[str, Any], Callable[[str], Any]]) -> int
(tot_messages, personals_pairs, options, output) = data
random.seed(os.getpid())
texts = open("zilencer/management/commands/test_messages.txt", "r").readlines()
offset = random.randint(0, len(texts))
recipient_streams = [klass.id for klass in
Recipient.objects.filter(type=Recipient.STREAM)] # type: List[int]
recipient_huddles = [h.id for h in Recipient.objects.filter(type=Recipient.HUDDLE)] # type: List[int]
huddle_members = {} # type: Dict[int, List[int]]
for h in recipient_huddles:
huddle_members[h] = [s.user_profile.id for s in
Subscription.objects.filter(recipient_id=h)]
num_messages = 0
random_max = 1000000
recipients = {} # type: Dict[int, Tuple[int, int, Dict[str, Any]]]
while num_messages < tot_messages:
saved_data = {} # type: Dict[str, Any]
message = Message()
message.sending_client = get_client('populate_db')
length = random.randint(1, 5)
lines = (t.strip() for t in texts[offset: offset + length])
message.content = '\n'.join(lines)
offset += length
offset = offset % len(texts)
randkey = random.randint(1, random_max)
if (num_messages > 0 and
random.randint(1, random_max) * 100. / random_max < options["stickyness"]):
# Use an old recipient
message_type, recipient_id, saved_data = recipients[num_messages - 1]
if message_type == Recipient.PERSONAL:
personals_pair = saved_data['personals_pair']
random.shuffle(personals_pair)
elif message_type == Recipient.STREAM:
message.subject = saved_data['subject']
message.recipient = get_recipient_by_id(recipient_id)
elif message_type == Recipient.HUDDLE:
message.recipient = get_recipient_by_id(recipient_id)
elif (randkey <= random_max * options["percent_huddles"] / 100.):
message_type = Recipient.HUDDLE
message.recipient = get_recipient_by_id(random.choice(recipient_huddles))
elif (randkey <= random_max * (options["percent_huddles"] + options["percent_personals"]) / 100.):
message_type = Recipient.PERSONAL
personals_pair = random.choice(personals_pairs)
random.shuffle(personals_pair)
elif (randkey <= random_max * 1.0):
message_type = Recipient.STREAM
message.recipient = get_recipient_by_id(random.choice(recipient_streams))
if message_type == Recipient.HUDDLE:
sender_id = random.choice(huddle_members[message.recipient.id])
message.sender = get_user_profile_by_id(sender_id)
elif message_type == Recipient.PERSONAL:
message.recipient = Recipient.objects.get(type=Recipient.PERSONAL,
type_id=personals_pair[0])
message.sender = get_user_profile_by_id(personals_pair[1])
saved_data['personals_pair'] = personals_pair
elif message_type == Recipient.STREAM:
stream = Stream.objects.get(id=message.recipient.type_id)
# Pick a random subscriber to the stream
message.sender = random.choice(Subscription.objects.filter(
recipient=message.recipient)).user_profile
message.subject = stream.name + Text(random.randint(1, 3))
saved_data['subject'] = message.subject
message.pub_date = timezone.now()
do_send_messages([{'message': message}])
recipients[num_messages] = (message_type, message.recipient.id, saved_data)
num_messages += 1
return tot_messages
def create_simple_community_realm():
# type: () -> None
simple_realm = Realm.objects.create(
string_id="simple", name="Simple Realm", restricted_to_domain=False,
invite_required=False, org_type=Realm.COMMUNITY, domain="simple.com")
names = [
("alice", "alice@example.com"),
("bob", "bob@foo.edu"),
("cindy", "cindy@foo.tv"),
]
create_users(simple_realm, names)
user_profiles = UserProfile.objects.filter(realm__string_id='simple')
create_user_presences(user_profiles)
def create_user_presences(user_profiles):
# type: (Iterable[UserProfile]) -> None
for user in user_profiles:
status = 1 # type: int
date = timezone.now()
client = get_client("website")
UserPresence.objects.get_or_create(
user_profile=user,
client=client,
timestamp=date,
status=status)
| |
import os
import random
import time
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.operations.volume_operations as vol_ops
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstackwoodpecker.operations.image_operations as img_ops
import zstackwoodpecker.operations.resource_operations as res_ops
import zstackwoodpecker.operations.account_operations as acc_ops
import zstackwoodpecker.zstack_test.zstack_test_vm as test_vm_header
import zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header
import zstackwoodpecker.zstack_test.zstack_test_volume as zstack_volume_header
import zstackwoodpecker.zstack_test.zstack_test_snapshot as zstack_sp_header
import zstackwoodpecker.operations.scenario_operations as sce_ops
import zstackwoodpecker.header.host as host_header
import zstackwoodpecker.header.volume as volume_header
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
Path = [[]]
index = 0
tag = "VM_TEST_REBOOT"
utility_vm = None
backup = None
backup_list = []
case_flavor = dict(snapshot_running= dict(vm_op=['DVOL_TEST_SNAPSHOT'], state_op=['VM_TEST_NONE']),
create_img_running= dict(vm_op=['DVOL_TEST_CREATE_IMG'], state_op=['VM_TEST_NONE']),
resize_running= dict(vm_op=['DVOL_TEST_RESIZE'], state_op=['VM_TEST_NONE']),
create_img_from_backup_running= dict(vm_op=['VM_TEST_BACKUP_IMAGE'], state_op=['VM_TEST_NONE']),
delete_snapshot_running= dict(vm_op=['DVOL_DEL_SNAPSHOT'], state_op=['VM_TEST_NONE']),
snapshot_stopped= dict(vm_op=['DVOL_TEST_SNAPSHOT'], state_op=['VM_TEST_STOP']),
create_img_stopped= dict(vm_op=['DVOL_TEST_CREATE_IMG'], state_op=['VM_TEST_STOP']),
resize_stopped= dict(vm_op=['DVOL_TEST_RESIZE'], state_op=['VM_TEST_STOP']),
create_img_from_backup_stopped= dict(vm_op=['VM_TEST_BACKUP_IMAGE'], state_op=['VM_TEST_STOP']),
delete_snapshot_stopped= dict(vm_op=['DVOL_DEL_SNAPSHOT'], state_op=['VM_TEST_STOP']),
revert_backup_stopped= dict(vm_op=['VM_TEST_REVERT_BACKUP'], state_op=['VM_TEST_STOP']),
)
def record(fun):
def recorder(vm, dvol, op):
global index
if op != tag:
Path[index].append(op)
elif op == tag:
Path.append([op])
Path[index].append(op)
index += 1
return fun(vm, dvol, op)
return recorder
VOL_OPS = [
"DVOL_TEST_CREATE_IMG",
"DVOL_TEST_SNAPSHOT",
"DVOL_DEL_SNAPSHOT",
"DVOL_TEST_RESIZE",
"VM_TEST_BACKUP_IMAGE"
"VM_TEST_REVERT_BACKUP"
]
VM_STATE_OPS = [
"VM_TEST_STOP",
"VM_TEST_REBOOT",
"VM_TEST_NONE"
]
@record
def vm_op_test(vm, dvol, op):
test_util.test_logger(vm.vm.name + "-------" + op)
ops = {
"VM_TEST_STOP": stop,
"VM_TEST_REBOOT": reboot,
"VM_TEST_NONE": do_nothing,
"DVOL_TEST_SNAPSHOT": create_snapshot,
"DVOL_DEL_SNAPSHOT": delete_snapshot,
"DVOL_TEST_CREATE_IMG": create_image,
"DVOL_TEST_RESIZE": resize_dvol,
"DVOL_BACKUP": back_up,
"VM_TEST_REVERT_BACKUP": revert_backup,
"VM_TEST_BACKUP_IMAGE": backup_image
}
ops[op](vm, dvol)
def stop(vm, dvol):
vm.stop()
def reboot(vm, dvol):
vm.reboot()
def do_nothing(vm, dvol):
pass
def create_snapshot(vm_obj, dvol):
global utility_vm
snapshots_root = zstack_sp_header.ZstackVolumeSnapshot()
snapshots_root.set_utility_vm(utility_vm)
snapshots_root.set_target_volume(dvol)
snapshots_root.create_snapshot('create_data_snapshot1')
def delete_snapshot(vm_obj, dvol):
global utility_vm
snapshots_root = zstack_sp_header.ZstackVolumeSnapshot()
snapshots_root.set_utility_vm(utility_vm)
snapshots_root.set_target_volume(dvol)
sp_list = snapshots_root.get_snapshot_list()
if sp_list:
snapshots_root.delete_snapshot(random.choice(sp_list))
def create_image(vm_obj, dvol):
volume_uuid = dvol.volume.uuid
bs_list = test_lib.lib_get_backup_storage_list_by_vm(vm_obj.vm)
image_option = test_util.ImageOption()
image_option.set_data_volume_uuid(volume_uuid)
image_option.set_name('image_resize_template')
image_option.set_backup_storage_uuid_list([bs_list[0].uuid])
image = img_ops.create_data_volume_template(image_option)
new_image = zstack_image_header.ZstackTestImage()
new_image.set_creation_option(image_option)
new_image.set_image(image)
new_image.check()
new_image.delete()
new_image.expunge()
def revert_backup(vm_obj, dvol):
backup_uuid = backup_list.pop(random.randint(0, len(backup_list)-1)).uuid
vol_ops.revert_volume_from_backup(backup_uuid)
def backup_image(vm_obj, dvol):
cond = res_ops.gen_query_conditions("type", '=', "ImageStoreBackupStorage")
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0]
backup = random.choice(backup_list)
image = img_ops.create_data_template_from_backup(bs.uuid, backup.uuid)
def resize_dvol(vm_obj, dvol):
vol_size = dvol.volume.size
volume_uuid = dvol.volume.uuid
set_size = 1024 * 1024 * 1024 + int(vol_size)
vol_ops.resize_data_volume(volume_uuid, set_size)
vm_obj.update()
# if set_size/vol_size_after > 0.9:
# test_util.test_fail('Resize Root Volume failed, size = %s' % vol_size_after)
#vm_obj.check()
#test_lib.lib_wait_target_up(vm_obj.get_vm().vmNics[0].ip, 22, 300)
def back_up(vm_obj, dvol):
global backup
cond = res_ops.gen_query_conditions("type", '=', "ImageStoreBackupStorage")
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0]
backup_option = test_util.BackupOption()
backup_option.set_name("test_compare")
backup_option.set_volume_uuid(dvol.volume.uuid)
backup_option.set_backupStorage_uuid(bs.uuid)
backup = vol_ops.create_backup(backup_option)
backup_list.append(backup)
def print_path(Path):
print("=" * 43 + "PATH" + "=" * 43)
for i in range(len(Path)):
path = ''
for j in range(len(Path[i])):
if j == len(Path[i]) - 1:
path += Path[i][j]
else:
path += (Path[i][j] + " --> ")
print(path)
print("=" * 90)
def test():
global test_obj_dict, VOL_OPS, VM_STATE_OPS, utility_vm, backup
flavor = case_flavor[os.environ.get('CASE_FLAVOR')]
DVOL_OP = flavor['vm_op']
STATE_OP = flavor['state_op']
ps = res_ops.query_resource(res_ops.PRIMARY_STORAGE)[0]
if ps.type == "AliyunNAS":
test_util.test_skip("VolumeBackup does not support AliyunNAS for now")
vm_name = "test_vm"
utility_vm_name = "utility_vm"
cond = res_ops.gen_query_conditions("system", '=', "false")
cond = res_ops.gen_query_conditions("mediaType", '=', "RootVolumeTemplate", cond)
cond = res_ops.gen_query_conditions("platform", '=', "Linux", cond)
cond = res_ops.gen_query_conditions("name", '=', "image_for_sg_test", cond)
img_name = res_ops.query_resource(res_ops.IMAGE, cond)[0].name
cond = res_ops.gen_query_conditions("category", '=', "Private")
l3_name = res_ops.query_resource(res_ops.L3_NETWORK, cond)[0].name
disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName'))
disk_offering_uuids = [disk_offering.uuid]
vm = test_stub.create_vm(vm_name, img_name, l3_name, disk_offering_uuids=disk_offering_uuids)
vm.check()
test_obj_dict.add_vm(vm)
hostuuid = vm.get_vm().hostUuid
utility_vm = test_stub.create_vm(utility_vm_name, img_name, l3_name, host_uuid=hostuuid)
utility_vm.check()
test_obj_dict.add_vm(utility_vm)
dvol = zstack_volume_header.ZstackTestVolume()
dvol.set_volume(test_lib.lib_get_data_volumes(vm.get_vm())[0])
dvol.set_state(volume_header.ATTACHED)
dvol.set_target_vm(vm)
test_obj_dict.add_volume(dvol)
if "VM_TEST_BACKUP_IMAGE" in DVOL_OP or "VM_TEST_REVERT_BACKUP" in DVOL_OP:
vm_op_test(vm, dvol, "DVOL_BACKUP")
if "DVOL_DEL_SNAPSHOT" in DVOL_OP:
vm_op_test(vm, dvol, "DVOL_TEST_SNAPSHOT")
for i in DVOL_OP:
vm_op_test(vm, dvol, random.choice(STATE_OP))
if not backup_list and "VM_TEST_BACKUP_IMAGE" == i:
i = "VM_TEST_NONE"
vm_op_test(vm, dvol, i)
if vm.state == "Stopped":
vm.start()
vm.check()
if test_lib.lib_is_vm_l3_has_vr(vm.vm):
test_lib.TestHarness = test_lib.TestHarnessVR
cmd = "dd if=/dev/urandom of=/dev/vdb bs=512k count=1"
test_lib.lib_execute_command_in_vm(vm.vm,cmd)
vm.suspend()
vm_op_test(vm, dvol, "DVOL_BACKUP")
if ps.type != inventory.CEPH_PRIMARY_STORAGE_TYPE:
compare(ps, vm, dvol, backup)
vm.resume()
print_path(Path)
test_lib.lib_error_cleanup(test_obj_dict)
def error_cleanup():
global test_obj_dict
print_path(Path)
test_lib.lib_error_cleanup(test_obj_dict)
def compare(ps, vm, dvol, backup):
test_util.test_logger("-----------------compare----------------")
# find vm_host
host = test_lib.lib_find_host_by_vm(vm.vm)
cond = res_ops.gen_query_conditions("type", '=', "ImageStoreBackupStorage")
bs = res_ops.query_resource(res_ops.BACKUP_STORAGE, cond)[0]
cond = res_ops.gen_query_conditions("uuid", '=', dvol.volume.uuid)
current_volume = res_ops.query_resource(res_ops.VOLUME, cond)[0]
vol_path = current_volume.installPath
if ps.type == "SharedBlock":
vol_path = "/dev/" + current_volume.installPath.split("/")[2] + "/" + current_volume.installPath.split("/")[3]
test_util.test_logger(vol_path)
name = backup.backupStorageRefs[0].installPath.split("/")[2]
id = backup.backupStorageRefs[0].installPath.split("/")[3]
# compare vm_root_volume & image
cmd = "mkdir /root/%s;" \
"/usr/local/zstack/imagestore/bin/zstcli " \
"-rootca=/var/lib/zstack/imagestorebackupstorage/package/certs/ca.pem " \
"-url=%s:8000 " \
"pull -installpath /root/%s/old.qcow2 %s:%s;" \
"qemu-img compare %s /root/%s/old.qcow2;" % (id, bs.hostname, id, name, id, vol_path, id)
# clean image
result = test_lib.lib_execute_ssh_cmd(host.managementIp, "root", "password", cmd, timeout=300)
if result != "Images are identical.\n":
test_util.test_fail("compare vm_root_volume & image created by backup")
| |
"""SCons.Defaults
Builders and other things for the local site. Here's where we'll
duplicate the functionality of autoconf until we move it into the
installation procedure or use something like qmconf.
The code that reads the registry to find MSVC components was borrowed
from distutils.msvccompiler.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Defaults.py 3897 2009/01/13 06:45:54 scons"
import os
import os.path
import errno
import shutil
import stat
import string
import time
import types
import sys
import SCons.Action
import SCons.Builder
import SCons.CacheDir
import SCons.Environment
import SCons.PathList
import SCons.Subst
import SCons.Tool
# A placeholder for a default Environment (for fetching source files
# from source code management systems and the like). This must be
# initialized later, after the top-level directory is set by the calling
# interface.
_default_env = None
# Lazily instantiate the default environment so the overhead of creating
# it doesn't apply when it's not needed.
def _fetch_DefaultEnvironment(*args, **kw):
"""
Returns the already-created default construction environment.
"""
global _default_env
return _default_env
def DefaultEnvironment(*args, **kw):
"""
Initial public entry point for creating the default construction
Environment.
After creating the environment, we overwrite our name
(DefaultEnvironment) with the _fetch_DefaultEnvironment() function,
which more efficiently returns the initialized default construction
environment without checking for its existence.
(This function still exists with its _default_check because someone
else (*cough* Script/__init__.py *cough*) may keep a reference
to this function. So we can't use the fully functional idiom of
having the name originally be a something that *only* creates the
construction environment and then overwrites the name.)
"""
global _default_env
if not _default_env:
import SCons.Util
_default_env = apply(SCons.Environment.Environment, args, kw)
if SCons.Util.md5:
_default_env.Decider('MD5')
else:
_default_env.Decider('timestamp-match')
global DefaultEnvironment
DefaultEnvironment = _fetch_DefaultEnvironment
_default_env._CacheDir_path = None
return _default_env
# Emitters for setting the shared attribute on object files,
# and an action for checking that all of the source files
# going into a shared library are, in fact, shared.
def StaticObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = None
return (target, source)
def SharedObjectEmitter(target, source, env):
for tgt in target:
tgt.attributes.shared = 1
return (target, source)
def SharedFlagChecker(source, target, env):
same = env.subst('$STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME')
if same == '0' or same == '' or same == 'False':
for src in source:
try:
shared = src.attributes.shared
except AttributeError:
shared = None
if not shared:
raise SCons.Errors.UserError, "Source file: %s is static and is not compatible with shared target: %s" % (src, target[0])
SharedCheck = SCons.Action.Action(SharedFlagChecker, None)
# Some people were using these variable name before we made
# SourceFileScanner part of the public interface. Don't break their
# SConscript files until we've given them some fair warning and a
# transition period.
CScan = SCons.Tool.CScanner
DScan = SCons.Tool.DScanner
LaTeXScan = SCons.Tool.LaTeXScanner
ObjSourceScan = SCons.Tool.SourceFileScanner
ProgScan = SCons.Tool.ProgramScanner
# These aren't really tool scanners, so they don't quite belong with
# the rest of those in Tool/__init__.py, but I'm not sure where else
# they should go. Leave them here for now.
import SCons.Scanner.Dir
DirScanner = SCons.Scanner.Dir.DirScanner()
DirEntryScanner = SCons.Scanner.Dir.DirEntryScanner()
# Actions for common languages.
CAction = SCons.Action.Action("$CCCOM", "$CCCOMSTR")
ShCAction = SCons.Action.Action("$SHCCCOM", "$SHCCCOMSTR")
CXXAction = SCons.Action.Action("$CXXCOM", "$CXXCOMSTR")
ShCXXAction = SCons.Action.Action("$SHCXXCOM", "$SHCXXCOMSTR")
ASAction = SCons.Action.Action("$ASCOM", "$ASCOMSTR")
ASPPAction = SCons.Action.Action("$ASPPCOM", "$ASPPCOMSTR")
LinkAction = SCons.Action.Action("$LINKCOM", "$LINKCOMSTR")
ShLinkAction = SCons.Action.Action("$SHLINKCOM", "$SHLINKCOMSTR")
LdModuleLinkAction = SCons.Action.Action("$LDMODULECOM", "$LDMODULECOMSTR")
# Common tasks that we allow users to perform in platform-independent
# ways by creating ActionFactory instances.
ActionFactory = SCons.Action.ActionFactory
def get_paths_str(dest):
# If dest is a list, we need to manually call str() on each element
if SCons.Util.is_List(dest):
elem_strs = []
for element in dest:
elem_strs.append('"' + str(element) + '"')
return '[' + string.join(elem_strs, ', ') + ']'
else:
return '"' + str(dest) + '"'
def chmod_func(dest, mode):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for element in dest:
os.chmod(str(element), mode)
def chmod_strfunc(dest, mode):
return 'Chmod(%s, 0%o)' % (get_paths_str(dest), mode)
Chmod = ActionFactory(chmod_func, chmod_strfunc)
def copy_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, 1)
Copy = ActionFactory(copy_func,
lambda dest, src: 'Copy("%s", "%s")' % (dest, src),
convert=str)
def delete_func(dest, must_exist=0):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
entry = str(entry)
if not must_exist and not os.path.exists(entry):
continue
if not os.path.exists(entry) or os.path.isfile(entry):
os.unlink(entry)
continue
else:
shutil.rmtree(entry, 1)
continue
def delete_strfunc(dest, must_exist=0):
return 'Delete(%s)' % get_paths_str(dest)
Delete = ActionFactory(delete_func, delete_strfunc)
def mkdir_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for entry in dest:
try:
os.makedirs(str(entry))
except os.error, e:
p = str(entry)
if e[0] == errno.EEXIST and os.path.isdir(str(entry)):
pass # not an error if already exists
else:
raise
Mkdir = ActionFactory(mkdir_func,
lambda dir: 'Mkdir(%s)' % get_paths_str(dir))
def move_func(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
SCons.Node.FS.invalidate_node_memos(src)
os.rename(src, dest)
Move = ActionFactory(move_func,
lambda dest, src: 'Move("%s", "%s")' % (dest, src),
convert=str)
def touch_func(dest):
SCons.Node.FS.invalidate_node_memos(dest)
if not SCons.Util.is_List(dest):
dest = [dest]
for file in dest:
file = str(file)
mtime = int(time.time())
if os.path.exists(file):
atime = os.path.getatime(file)
else:
open(file, 'w')
atime = mtime
os.utime(file, (atime, mtime))
Touch = ActionFactory(touch_func,
lambda file: 'Touch(%s)' % get_paths_str(file))
# Internal utility functions
def _concat(prefix, list, suffix, env, f=lambda x: x, target=None, source=None):
"""
Creates a new list from 'list' by first interpolating each element
in the list using the 'env' dictionary and then calling f on the
list, and finally calling _concat_ixes to concatenate 'prefix' and
'suffix' onto each element of the list.
"""
if not list:
return list
l = f(SCons.PathList.PathList(list).subst_path(env, target, source))
if not l is None:
list = l
return _concat_ixes(prefix, list, suffix, env)
def _concat_ixes(prefix, list, suffix, env):
"""
Creates a new list from 'list' by concatenating the 'prefix' and
'suffix' arguments onto each element of the list. A trailing space
on 'prefix' or leading space on 'suffix' will cause them to be put
into separate list elements rather than being concatenated.
"""
result = []
# ensure that prefix and suffix are strings
prefix = str(env.subst(prefix, SCons.Subst.SUBST_RAW))
suffix = str(env.subst(suffix, SCons.Subst.SUBST_RAW))
for x in list:
if isinstance(x, SCons.Node.FS.File):
result.append(x)
continue
x = str(x)
if x:
if prefix:
if prefix[-1] == ' ':
result.append(prefix[:-1])
elif x[:len(prefix)] != prefix:
x = prefix + x
result.append(x)
if suffix:
if suffix[0] == ' ':
result.append(suffix[1:])
elif x[-len(suffix):] != suffix:
result[-1] = result[-1]+suffix
return result
def _stripixes(prefix, list, suffix, stripprefixes, stripsuffixes, env, c=None):
"""
This is a wrapper around _concat()/_concat_ixes() that checks for the
existence of prefixes or suffixes on list elements and strips them
where it finds them. This is used by tools (like the GNU linker)
that need to turn something like 'libfoo.a' into '-lfoo'.
"""
if not list:
return list
if not callable(c):
env_c = env['_concat']
if env_c != _concat and callable(env_c):
# There's a custom _concat() method in the construction
# environment, and we've allowed people to set that in
# the past (see test/custom-concat.py), so preserve the
# backwards compatibility.
c = env_c
else:
c = _concat_ixes
stripprefixes = map(env.subst, SCons.Util.flatten(stripprefixes))
stripsuffixes = map(env.subst, SCons.Util.flatten(stripsuffixes))
stripped = []
for l in SCons.PathList.PathList(list).subst_path(env, None, None):
if isinstance(l, SCons.Node.FS.File):
stripped.append(l)
continue
if not SCons.Util.is_String(l):
l = str(l)
for stripprefix in stripprefixes:
lsp = len(stripprefix)
if l[:lsp] == stripprefix:
l = l[lsp:]
# Do not strip more than one prefix
break
for stripsuffix in stripsuffixes:
lss = len(stripsuffix)
if l[-lss:] == stripsuffix:
l = l[:-lss]
# Do not strip more than one suffix
break
stripped.append(l)
return c(prefix, stripped, suffix, env)
def _defines(prefix, defs, suffix, env, c=_concat_ixes):
"""A wrapper around _concat_ixes that turns a list or string
into a list of C preprocessor command-line definitions.
"""
if SCons.Util.is_List(defs):
l = []
for d in defs:
if SCons.Util.is_List(d) or type(d) is types.TupleType:
l.append(str(d[0]) + '=' + str(d[1]))
else:
l.append(str(d))
elif SCons.Util.is_Dict(defs):
# The items in a dictionary are stored in random order, but
# if the order of the command-line options changes from
# invocation to invocation, then the signature of the command
# line will change and we'll get random unnecessary rebuilds.
# Consequently, we have to sort the keys to ensure a
# consistent order...
l = []
keys = defs.keys()
keys.sort()
for k in keys:
v = defs[k]
if v is None:
l.append(str(k))
else:
l.append(str(k) + '=' + str(v))
else:
l = [str(defs)]
return c(prefix, env.subst_path(l), suffix, env)
class NullCmdGenerator:
"""This is a callable class that can be used in place of other
command generators if you don't want them to do anything.
The __call__ method for this class simply returns the thing
you instantiated it with.
Example usage:
env["DO_NOTHING"] = NullCmdGenerator
env["LINKCOM"] = "${DO_NOTHING('$LINK $SOURCES $TARGET')}"
"""
def __init__(self, cmd):
self.cmd = cmd
def __call__(self, target, source, env, for_signature=None):
return self.cmd
class Variable_Method_Caller:
"""A class for finding a construction variable on the stack and
calling one of its methods.
We use this to support "construction variables" in our string
eval()s that actually stand in for methods--specifically, use
of "RDirs" in call to _concat that should actually execute the
"TARGET.RDirs" method. (We used to support this by creating a little
"build dictionary" that mapped RDirs to the method, but this got in
the way of Memoizing construction environments, because we had to
create new environment objects to hold the variables.)
"""
def __init__(self, variable, method):
self.variable = variable
self.method = method
def __call__(self, *args, **kw):
try: 1/0
except ZeroDivisionError:
# Don't start iterating with the current stack-frame to
# prevent creating reference cycles (f_back is safe).
frame = sys.exc_info()[2].tb_frame.f_back
variable = self.variable
while frame:
if frame.f_locals.has_key(variable):
v = frame.f_locals[variable]
if v:
method = getattr(v, self.method)
return apply(method, args, kw)
frame = frame.f_back
return None
ConstructionEnvironment = {
'BUILDERS' : {},
'SCANNERS' : [],
'CONFIGUREDIR' : '#/.sconf_temp',
'CONFIGURELOG' : '#/config.log',
'CPPSUFFIXES' : SCons.Tool.CSuffixes,
'DSUFFIXES' : SCons.Tool.DSuffixes,
'ENV' : {},
'IDLSUFFIXES' : SCons.Tool.IDLSuffixes,
'LATEXSUFFIXES' : SCons.Tool.LaTeXSuffixes,
'_concat' : _concat,
'_defines' : _defines,
'_stripixes' : _stripixes,
'_LIBFLAGS' : '${_concat(LIBLINKPREFIX, LIBS, LIBLINKSUFFIX, __env__)}',
'_LIBDIRFLAGS' : '$( ${_concat(LIBDIRPREFIX, LIBPATH, LIBDIRSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPINCFLAGS' : '$( ${_concat(INCPREFIX, CPPPATH, INCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)',
'_CPPDEFFLAGS' : '${_defines(CPPDEFPREFIX, CPPDEFINES, CPPDEFSUFFIX, __env__)}',
'TEMPFILE' : NullCmdGenerator,
'Dir' : Variable_Method_Caller('TARGET', 'Dir'),
'Dirs' : Variable_Method_Caller('TARGET', 'Dirs'),
'File' : Variable_Method_Caller('TARGET', 'File'),
'RDirs' : Variable_Method_Caller('TARGET', 'RDirs'),
}
| |
"""A basic extended attributes (xattr) implementation for Linux and MacOS X
"""
import errno
import os
import sys
import tempfile
from ctypes import CDLL, create_string_buffer, c_ssize_t, c_size_t, c_char_p, c_int, c_uint32, get_errno
from ctypes.util import find_library
def is_enabled():
"""Determine if xattr is enabled on the filesystem
"""
with tempfile.NamedTemporaryFile() as fd:
try:
setxattr(fd.fileno(), 'user.name', b'value')
except OSError:
return False
return getxattr(fd.fileno(), 'user.name') == b'value'
def get_all(path, follow_symlinks=True):
try:
return dict((name, getxattr(path, name, follow_symlinks=follow_symlinks))
for name in listxattr(path, follow_symlinks=follow_symlinks))
except OSError as e:
if e.errno in (errno.ENOTSUP, errno.EPERM):
return {}
libc = CDLL(find_library('c'), use_errno=True)
def _check(rv, path=None):
if rv < 0:
raise OSError(get_errno(), path)
return rv
if sys.platform.startswith('linux'):
libc.llistxattr.argtypes = (c_char_p, c_char_p, c_size_t)
libc.llistxattr.restype = c_ssize_t
libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t)
libc.flistxattr.restype = c_ssize_t
libc.lsetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_int)
libc.lsetxattr.restype = c_int
libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_int)
libc.fsetxattr.restype = c_int
libc.lgetxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t)
libc.lgetxattr.restype = c_ssize_t
libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t)
libc.fgetxattr.restype = c_ssize_t
def listxattr(path, *, follow_symlinks=True):
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.flistxattr
elif follow_symlinks:
func = libc.listxattr
else:
func = libc.llistxattr
n = _check(func(path, None, 0), path)
if n == 0:
return []
namebuf = create_string_buffer(n)
n2 = _check(func(path, namebuf, n), path)
if n2 != n:
raise Exception('listxattr failed')
return [os.fsdecode(name) for name in namebuf.raw.split(b'\0')[:-1] if not name.startswith(b'system.posix_acl_')]
def getxattr(path, name, *, follow_symlinks=True):
name = os.fsencode(name)
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.fgetxattr
elif follow_symlinks:
func = libc.getxattr
else:
func = libc.lgetxattr
n = _check(func(path, name, None, 0))
if n == 0:
return
valuebuf = create_string_buffer(n)
n2 = _check(func(path, name, valuebuf, n), path)
if n2 != n:
raise Exception('getxattr failed')
return valuebuf.raw
def setxattr(path, name, value, *, follow_symlinks=True):
name = os.fsencode(name)
value = value and os.fsencode(value)
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.fsetxattr
elif follow_symlinks:
func = libc.setxattr
else:
func = libc.lsetxattr
_check(func(path, name, value, len(value) if value else 0, 0), path)
elif sys.platform == 'darwin':
libc.listxattr.argtypes = (c_char_p, c_char_p, c_size_t, c_int)
libc.listxattr.restype = c_ssize_t
libc.flistxattr.argtypes = (c_int, c_char_p, c_size_t)
libc.flistxattr.restype = c_ssize_t
libc.setxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.setxattr.restype = c_int
libc.fsetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.fsetxattr.restype = c_int
libc.getxattr.argtypes = (c_char_p, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.getxattr.restype = c_ssize_t
libc.fgetxattr.argtypes = (c_int, c_char_p, c_char_p, c_size_t, c_uint32, c_int)
libc.fgetxattr.restype = c_ssize_t
XATTR_NOFOLLOW = 0x0001
def listxattr(path, *, follow_symlinks=True):
func = libc.listxattr
flags = 0
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.flistxattr
elif not follow_symlinks:
flags = XATTR_NOFOLLOW
n = _check(func(path, None, 0, flags), path)
if n == 0:
return []
namebuf = create_string_buffer(n)
n2 = _check(func(path, namebuf, n, flags), path)
if n2 != n:
raise Exception('listxattr failed')
return [os.fsdecode(name) for name in namebuf.raw.split(b'\0')[:-1]]
def getxattr(path, name, *, follow_symlinks=True):
name = os.fsencode(name)
func = libc.getxattr
flags = 0
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.fgetxattr
elif not follow_symlinks:
flags = XATTR_NOFOLLOW
n = _check(func(path, name, None, 0, 0, flags))
if n == 0:
return
valuebuf = create_string_buffer(n)
n2 = _check(func(path, name, valuebuf, n, 0, flags), path)
if n2 != n:
raise Exception('getxattr failed')
return valuebuf.raw
def setxattr(path, name, value, *, follow_symlinks=True):
name = os.fsencode(name)
value = value and os.fsencode(value)
func = libc.setxattr
flags = 0
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.fsetxattr
elif not follow_symlinks:
flags = XATTR_NOFOLLOW
_check(func(path, name, value, len(value) if value else 0, 0, flags), path)
elif sys.platform.startswith('freebsd'):
EXTATTR_NAMESPACE_USER = 0x0001
libc.extattr_list_fd.argtypes = (c_int, c_int, c_char_p, c_size_t)
libc.extattr_list_fd.restype = c_ssize_t
libc.extattr_list_link.argtypes = (c_char_p, c_int, c_char_p, c_size_t)
libc.extattr_list_link.restype = c_ssize_t
libc.extattr_list_file.argtypes = (c_char_p, c_int, c_char_p, c_size_t)
libc.extattr_list_file.restype = c_ssize_t
libc.extattr_get_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_fd.restype = c_ssize_t
libc.extattr_get_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_link.restype = c_ssize_t
libc.extattr_get_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_get_file.restype = c_ssize_t
libc.extattr_set_fd.argtypes = (c_int, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_fd.restype = c_int
libc.extattr_set_link.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_link.restype = c_int
libc.extattr_set_file.argtypes = (c_char_p, c_int, c_char_p, c_char_p, c_size_t)
libc.extattr_set_file.restype = c_int
def listxattr(path, *, follow_symlinks=True):
ns = EXTATTR_NAMESPACE_USER
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.extattr_list_fd
elif follow_symlinks:
func = libc.extattr_list_file
else:
func = libc.extattr_list_link
n = _check(func(path, ns, None, 0), path)
if n == 0:
return []
namebuf = create_string_buffer(n)
n2 = _check(func(path, ns, namebuf, n), path)
if n2 != n:
raise Exception('listxattr failed')
names = []
mv = memoryview(namebuf.raw)
while mv:
length = mv[0]
# Python < 3.3 returns bytes instead of int
if isinstance(length, bytes):
length = ord(length)
names.append(os.fsdecode(bytes(mv[1:1+length])))
mv = mv[1+length:]
return names
def getxattr(path, name, *, follow_symlinks=True):
name = os.fsencode(name)
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.extattr_get_fd
elif follow_symlinks:
func = libc.extattr_get_file
else:
func = libc.extattr_get_link
n = _check(func(path, EXTATTR_NAMESPACE_USER, name, None, 0))
if n == 0:
return
valuebuf = create_string_buffer(n)
n2 = _check(func(path, EXTATTR_NAMESPACE_USER, name, valuebuf, n), path)
if n2 != n:
raise Exception('getxattr failed')
return valuebuf.raw
def setxattr(path, name, value, *, follow_symlinks=True):
name = os.fsencode(name)
value = value and os.fsencode(value)
if isinstance(path, str):
path = os.fsencode(path)
if isinstance(path, int):
func = libc.extattr_set_fd
elif follow_symlinks:
func = libc.extattr_set_file
else:
func = libc.extattr_set_link
_check(func(path, EXTATTR_NAMESPACE_USER, name, value, len(value) if value else 0), path)
else:
def listxattr(path, *, follow_symlinks=True):
return []
def getxattr(path, name, *, follow_symlinks=True):
pass
def setxattr(path, name, value, *, follow_symlinks=True):
pass
| |
#!/usr/bin/python
#
# This file is part of Mitsuba, a physically based rendering system.
#
# Copyright (c) 2007-2014 by Wenzel Jakob and others.
#
# Mitsuba is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License Version 3
# as published by the Free Software Foundation.
#
# Mitsuba is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pprint import pprint
from boto.ec2.connection import EC2Connection
import os, sys, time, re, datetime, subprocess, base64, boto
# Configure this with the authentication data of your account
AWS_ACCESS_KEY_ID = 'XXXXXXXXXXXXXXXXXXXX'
AWS_SECRET_ACCESS_KEY = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
KEYPAIR_NAME = 'XXXXXXXXX'
# Where do you want to start nodes? (e.g. us-east-1, eu-west-1, etc..)
AWS_REGION = 'us-east-1'
# Ensure that the SSH private key file of the following name is
# located in the same directory as this script
SSH_PRIVATE_KEY_FILE = KEYPAIR_NAME + ".pem"
# Repository to be used for fetching Mitsuba Ubuntu packages
PKG_REPOSITORY = 'deb https://www.mitsuba-renderer.org binary/'
# Optional: when syncing additional files to the cluster nodes,
# you can specify the relevant S3 bucket here
S3_PATH = 's3://XXXXXX'
conn = None
# AMI ID: Stateless Ubuntu Maverick 64bit
ami_ids = {
'ap-northeast-1' : 'ami-420fa443',
'ap-southeast-1' : 'ami-12423c40',
'eu-west-1' : 'ami-1b9ca96f',
'us-east-1' : 'ami-08f40561',
'us-west-1' : 'ami-a17e2ee4'
}
def remoteCommand(host, cmd, quiet = True):
#print('Executing command "%s" on node %s' % (cmd, host))
return subprocess.Popen(['ssh', '-i', SSH_PRIVATE_KEY_FILE,
'-o', 'StrictHostKeyChecking=no',
'-o', 'LogLevel=ERROR',
'-o', 'UserKnownHostsFile=/dev/null',
'ubuntu@%s' % host,
'bash -c \'%s\'' % cmd], stdout = subprocess.PIPE if quiet else sys.stdout)
def remoteAdminCommand(host, cmd, quiet = True):
#print('Executing admin command "%s" on node %s' % (cmd, host))
return subprocess.Popen(['ssh', '-i', SSH_PRIVATE_KEY_FILE,
'-o', 'StrictHostKeyChecking=no',
'-o', 'LogLevel=ERROR',
'-o', 'UserKnownHostsFile=/dev/null',
'ubuntu@%s' % host,
'sudo bash -c \'%s\'' % cmd], stdout = subprocess.PIPE if quiet else sys.stdout)
def parse_timestamp(s):
"""Returns (datetime, tz offset in minutes) or (None, None)."""
m = re.match(""" ^
(?P<year>-?[0-9]{4}) - (?P<month>[0-9]{2}) - (?P<day>[0-9]{2})
T (?P<hour>[0-9]{2}) : (?P<minute>[0-9]{2}) : (?P<second>[0-9]{2})
(?P<microsecond>\.[0-9]{1,6})?
(?P<tz>
Z | (?P<tz_hr>[-+][0-9]{2}) : (?P<tz_min>[0-9]{2})
)?
$ """, s, re.X)
if m is not None:
values = m.groupdict()
if values["tz"] in ("Z", None):
tz = 0
else:
tz = int(values["tz_hr"]) * 60 + int(values["tz_min"])
if values["microsecond"] is None:
values["microsecond"] = 0
else:
values["microsecond"] = values["microsecond"][1:]
values["microsecond"] += "0" * (6 - len(values["microsecond"]))
values = dict((k, int(v)) for k, v in values.iteritems()
if not k.startswith("tz"))
try:
return datetime.datetime(**values), tz
except ValueError:
pass
return None, None
def addNodes(instanceType, nodeCount, groupName):
print('Booting %i nodes of type %s (group name = "%s") ..' % (nodeCount, instanceType, groupName))
ami_id = ami_ids[AWS_REGION]
image = conn.get_image(ami_id)
reservation = image.run(min_count=nodeCount, max_count=nodeCount,
instance_type = instanceType, key_name = KEYPAIR_NAME,
user_data=groupName)
while True:
time.sleep(2)
runningCount = 0
for i in reservation.instances:
i.update()
if i.state == u'running':
runningCount += 1
print("%i/%i nodes are ready." % (runningCount, nodeCount))
if runningCount == nodeCount:
print("All nodes are running. Note that the actual OS might still take a few minutes")
print("until it is fully initialized. Please wait sufficiently long or the following")
print("steps ('install', 'start', ..) will fail.")
break;
def addSpotNodes(instanceType, nodeCount, maxPrice, groupName):
print('Requesting %i spot nodes of type %s (group name = "%s", max. price=%f)..' % (nodeCount, instanceType, groupName, maxPrice))
ami_id = ami_ids[AWS_REGION]
conn.request_spot_instances(str(maxPrice), ami_id, count=nodeCount,
instance_type = instanceType, key_name = KEYPAIR_NAME,
user_data=groupName, type='one-time')
print('Done.')
def getGroup(instance):
return base64.b64decode(conn.get_instance_attribute(instance.id, 'userData')['userData'])
def status():
print("Querying spot instance requests...")
spotInstances = conn.get_all_spot_instance_requests()
for i in spotInstances:
print(" %s: status=%s, price must be <= %.3f$" % (i.id, i.state, i.price))
print("")
print("Querying instances ...")
reservations = conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
nodesByGroup = {}
print("")
for i in instances:
if i.state != 'terminated':
group = getGroup(i)
if not group in nodesByGroup:
nodesByGroup[group] = []
spot_str = ""
if i.spot_instance_request_id != None:
spot_str = ", spot request: %s" % i.spot_instance_request_id
dt, tz = parse_timestamp(i.launch_time)
curTime = datetime.datetime(*time.gmtime()[:6])
delta = curTime-dt
hours, remainder = divmod(delta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
nodesByGroup[group] += [" %s is %s (type: %s, running for: %id %ih %im, internal IP: %s%s)" % (i.public_dns_name, i.state, i.instance_type,
delta.days, hours, minutes, i.private_ip_address, spot_str)]
for g in nodesByGroup:
print('Nodes in group "%s"' % g)
print('===================')
for n in nodesByGroup[g]:
print(n)
def terminate(name):
reservations = conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
for i in instances:
if i.state == u'running' and i.public_dns_name == name:
print("Stopping node %s .." % i.public_dns_name)
i.terminate()
return
print('Node could not be found or is not running')
def terminateAll(groupName):
print('Terminating all nodes in group \"%s\" ..' % groupName)
reservations = conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
for i in instances:
if i.state == u'running' and getGroup(i) == groupName:
print("Stopping node %s .." % i.public_dns_name)
i.terminate()
def cancelSpot(id):
print('Terminating spot request \"%s\" ..' % id)
result = conn.cancel_spot_instance_requests([id])
for n in result:
if n.spot_instance_request_id == id:
print('Success')
return
print('Failed!')
def cancelAllSpot():
print('Terminating all spot requests ...')
spotInstances = conn.get_all_spot_instance_requests()
for i in spotInstances:
if i.state == 'active' or i.state == 'open':
cancelSpot(i.id)
def install(groupName):
print('Installing Mitsuba on all nodes of group "%s" ..' % groupName)
reservations = conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
processes = []
for i in instances:
if i.state == u'running' and getGroup(i) == groupName:
print("Sending command to node %s" % i.public_dns_name)
processes += [remoteAdminCommand(i.public_dns_name, 'echo \"%s\" > /etc/apt/sources.list.d/mitsuba.list; dpkg --purge mitsuba; apt-get clean; export DEBIAN_FRONTEND=noninteractive; apt-get update; apt-get -q -y --allow-unauthenticated install mitsuba s3cmd; chown ubuntu /mnt' % PKG_REPOSITORY)]
while True:
doneCount = 0
time.sleep(2)
for p in processes:
if p.poll() != None:
doneCount += 1
print("%i/%i nodes are ready." % (doneCount, len(processes)))
if doneCount == len(processes):
print("All nodes are ready.")
break;
def systemLoad(groupName):
print('Querying system load on all nodes of group "%s" ..' % groupName)
reservations = conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
processes = []
for i in instances:
if i.state == u'running' and getGroup(i) == groupName:
processes += [remoteCommand(i.public_dns_name, 'echo `cat /proc/loadavg | cut --delimiter=" " --fields=1-3` -- `ec2metadata --public-hostname`', False)]
while True:
doneCount = 0
time.sleep(1)
for p in processes:
if p.poll() != None:
doneCount += 1
if doneCount == len(processes):
print("Done.")
break;
def runCommand(cmd, groupName):
print('Executing command "%s" on all nodes of group "%s" ..' % (cmd, groupName))
reservations = conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
processes = []
for i in instances:
if i.state == u'running' and getGroup(i) == groupName:
print("Sending command to node %s" % i.public_dns_name)
processes += [remoteCommand(i.public_dns_name, cmd, False)]
while True:
doneCount = 0
time.sleep(1)
for p in processes:
if p.poll() != None:
doneCount += 1
if doneCount == len(processes):
print("Done.")
break;
def syncData(prefix, groupName):
print('Fetching S3 prefix "%s/%s" on all nodes of the group "%s" ..' % (S3_PATH, prefix, groupName))
reservations = conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
processes = []
for i in instances:
if i.state == u'running' and getGroup(i) == groupName:
print("Sending command to node %s" % i.public_dns_name)
os.system('scp -i %s -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -o StrictHostKeyChecking=no -q .s3cfg ubuntu@%s:' % (SSH_PRIVATE_KEY_FILE, i.public_dns_name))
processes += [remoteCommand(i.public_dns_name, 'cd /mnt; s3cmd sync %s/%s .' % (S3_PATH, prefix))]
while True:
doneCount = 0
time.sleep(2)
for p in processes:
if p.poll() != None:
doneCount += 1
print("%i/%i nodes are ready." % (doneCount, len(processes)))
if doneCount == len(processes):
print("All nodes are ready.")
break;
def start(groupName):
print('Creating a Mitsuba cluster using the nodes of group "%s"' % groupName)
reservations = conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
activeNodes = []
activeNodeIPs = []
processes = []
for i in instances:
if i.state == u'running' and getGroup(i) == groupName:
activeNodes += [i.public_dns_name]
activeNodeIPs += [i.private_ip_address]
if len(activeNodes) == 0:
print("There are no running nodes!")
return
headNode = activeNodes[len(activeNodes)-1]
list.remove(activeNodes, headNode)
list.remove(activeNodeIPs, activeNodeIPs[len(activeNodeIPs)-1])
for i in range(0, len(activeNodes)):
print("Sending command to node %s" % activeNodes[i])
processes += [remoteCommand(activeNodes[i], 'killall --quiet mtssrv; cd /mnt; nohup mtssrv -vq >/dev/null >&1 &')]
connArgument = str.join(';', activeNodeIPs)
if len(connArgument) > 0:
connArgument = '-c \"' + connArgument + "\""
while True:
doneCount = 0
time.sleep(2)
for p in processes:
if p.poll() != None:
doneCount += 1
print("%i/%i nodes are ready." % (doneCount, len(processes)))
if doneCount == len(processes):
print("All nodes are ready.")
break;
print("Creating head node ..")
p = remoteCommand(headNode, 'killall --quiet mtssrv; cd /mnt; let pcount=`grep processor /proc/cpuinfo | wc -l`; let pcount=`perl -e "use POSIX qw/ceil/; print $pcount-2 < 1 ? 1 : $pcount-2"`; nohup mtssrv -p$pcount -vq %s >/dev/null >&1 &' % connArgument)
p.wait()
print('Done -- you can specify the head node "%s" in the Mitsuba network rendering dialog' % headNode)
def spotPrices(instanceType):
start = datetime.datetime.now()
end = datetime.datetime.now()
start = start.replace(hour=0, minute=0, second=0, microsecond=0)
end = end.replace(second=0, microsecond=0)
startTime = start.isoformat() + str.format('Z{0:+06.2f}', float(time.timezone) / 3600)
endTime = end.isoformat() + str.format('Z{0:+06.2f}', float(time.timezone) / 3600)
history = conn.get_spot_price_history(start_time=startTime, end_time=endTime,
instance_type=instanceType, product_description = 'Linux/UNIX')
for h in history:
timestamp, tz = parse_timestamp(h.timestamp)
print ("%s => %.5f dollars" % (timestamp, h.price))
def login(name):
os.system('ssh -o UserKnownHostsFile=/dev/null -o LogLevel=ERROR -o StrictHostKeyChecking=no -i %s ubuntu@%s' % (SSH_PRIVATE_KEY_FILE, name))
if len(sys.argv) == 1:
print('')
print(' Mitsuba Amazon EC2 instance launcher')
print('')
print(' Copyright (C) 2007-2014 by Wenzel Jakob and others. This is free software;')
print(' see the source for copying conditions. There is NO warranty; not even for')
print(' MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.')
print('')
print(' Syntax: ./cluster.py <command> [arguments] where command is one of')
print('')
print(' status -- Prints a list of all currently running cluster nodes')
print('')
print(' addNodes [instance-type] [number] <group> -- adds further machines to the')
print(' pool of rendering nodes. The following instances types are')
print(' curently available (N. Virginia pricing as of Jan 8 2011)')
print('')
print(' t1.micro ($0.02/hr, 1 core , 613 MB RAM, 1-2 ECU)')
print(' m1.large ($0.34/hr, 2 cores, 7.5 GB RAM, 4 ECU)')
print(' m1.xlarge ($0.68/hr, 4 cores, 15 GB RAM, 8 ECU)')
print(' c1.medium ($0.17/hr, 2 cores, 1.7 GB RAM, 5 ECU)')
print(' c1.xlarge ($0.68/hr, 8 cores, 7 GB RAM, 20 ECU)')
print(' cc1.4xlarge ($1.60/hr, 8 cores, 23 GB RAM, 33.5 ECU)')
print('')
print(' addSpotNodes [instance-type] [number] [maxprice] <group> -- adds one or')
print(' more spot node request to the request queue. Apart from the')
print(' maximum price parameter, the arguments are as above.')
print('')
print(' spotPrices [instance-type] -- get a 1-day history of spot instance')
print(' prices for the specified type of machine (e.g. c1.xlarge)')
print('')
print(' terminate [nodename] -- Terminates a certain node specified by name. ')
print(' Note that Amazon will still bill you for any partial ')
print(' accrued hours!')
print('')
print(' terminateAll <group> -- Terminates *all* currently running nodes of the')
print(' specified group. Note that Amazon will still bill you for ')
print(' any partial accrued hours!')
print('')
print(' cancelSpot <spot-id> -- Cancels an active spot node request. The')
print(' required ID can be obtained using the "status" command.')
print('')
print(' cancelAllSpot -- Cancels all active spot node requests.')
print('')
print(' login [nodename] -- Opens a shell on any running node, whose name')
print(' can be obtained using the status command. All mitsuba-')
print(' related data (log files, etc) can be found in "/mnt".')
print('')
print(' install <group> -- Fetch and install the most recent version of Mitsuba')
print(' on all running compute nodes of the specied group.')
print('')
print(' start <group> -- Start/restart the Mitsuba server on all currently running')
print(' nodes of the specified group. The launcher creates a setup')
print(' where all EC2 nodes are linked to each other on the fast ')
print(' internal Amazon network, and any communication with the ')
print(' outside world happens through a "head" node, whose address is')
print(' printed at the end.')
print('')
print(' systemLoad <group> -- Prints the system load (1, 5 and 15-minute averages)')
print(' for each machine in the specified group')
print('')
print(' runCommand "cmd args" <group> -- Runs the specified command on each machine')
print(' in the specified group. Note that it has to be in quotes')
print('')
print(' syncData [prefix] <group> -- Downloads a file from the registered S3 bucket')
print(' so that it is available to any rendering jobs. The download is')
print(' simultaneously performed on all nodes of the specified group. ')
print(' Note that only a prefix must be specified -- e.g. when there are')
print(' 20 data files starting with myData_*, running \"syncData myData\"')
print(' will fetch them all. Any previously downloaded data will')
print(' not be downloaded again. Note that you must ensure that a .s3cfg')
print(' file is located in the same directory as this script, which ')
print(' contains the S3 access credentials required by the "s3cmd"')
print(' utility.')
print('')
print(' The usual order of these is: addNodes, install, start,')
print(' and optionally syncData. At the end, dont\'t forget to run terminateAll.')
print(' Many commands accept an optional <group> argument -- this can be used')
print(' to set up multiple independent clusters on the same AWS account. If no')
print(' group is specified, the default group ("default") is assumed.')
print('')
sys.exit(0)
conn = boto.ec2.connect_to_region(AWS_REGION,
aws_access_key_id = AWS_ACCESS_KEY_ID,
aws_secret_access_key = AWS_SECRET_ACCESS_KEY)
if sys.argv[1] == 'addNodes':
if len(sys.argv) == 5:
addNodes(sys.argv[2], int(sys.argv[3]), sys.argv[4])
elif len(sys.argv) == 4:
addNodes(sys.argv[2], int(sys.argv[3]), 'default')
else:
print('addNodes: Invalid number of arguments!')
elif sys.argv[1] == 'addSpotNodes':
if len(sys.argv) == 6:
addSpotNodes(sys.argv[2], int(sys.argv[3]), float(sys.argv[4]), sys.argv[5])
elif len(sys.argv) == 5:
addSpotNodes(sys.argv[2], int(sys.argv[3]), float(sys.argv[4]), 'default')
else:
print('addNodes: Invalid number of arguments!')
elif sys.argv[1] == 'status':
if len(sys.argv) == 2:
status()
else:
print('status: Invalid number of arguments!')
elif sys.argv[1] == 'terminate':
if len(sys.argv) == 3:
terminate(sys.argv[2])
else:
print('terminate: Invalid number of arguments!')
elif sys.argv[1] == 'terminateAll':
if len(sys.argv) == 2:
terminateAll('default')
elif len(sys.argv) == 3:
terminateAll(sys.argv[2])
else:
print('terminateAll: Invalid number of arguments!')
elif sys.argv[1] == 'cancelSpot':
if len(sys.argv) == 3:
cancelSpot(sys.argv[2])
else:
print('cancelSpot: Invalid number of arguments!')
elif sys.argv[1] == 'cancelAllSpot':
if len(sys.argv) == 2:
cancelAllSpot()
else:
print('cancelAllSpot: Invalid number of arguments!')
elif sys.argv[1] == 'install':
if len(sys.argv) == 2:
install('default')
elif len(sys.argv) == 3:
install(sys.argv[2])
else:
print('install: Invalid number of arguments!')
elif sys.argv[1] == 'syncData':
if len(sys.argv) == 3:
syncData(sys.argv[2], 'default')
elif len(sys.argv) == 4:
syncData(sys.argv[2], sys.argv[3])
else:
print('syncData: Invalid number of arguments!')
elif sys.argv[1] == 'start':
if len(sys.argv) == 2:
start('default')
elif len(sys.argv) == 3:
start(sys.argv[2])
else:
print('start: Invalid number of arguments!')
elif sys.argv[1] == 'login':
if len(sys.argv) == 3:
login(sys.argv[2])
else:
print('login: Invalid number of arguments!')
elif sys.argv[1] == 'spotPrices':
if len(sys.argv) == 3:
spotPrices(sys.argv[2])
else:
print('spotPrices: Invalid number of arguments!')
elif sys.argv[1] == 'systemLoad':
if len(sys.argv) == 2:
systemLoad('default')
elif len(sys.argv) == 3:
systemLoad(sys.argv[2])
else:
systemLoad('systemLoad: Invalid number of arguments!')
elif sys.argv[1] == 'runCommand':
if len(sys.argv) == 3:
runCommand(sys.argv[2], 'default')
elif len(sys.argv) == 4:
runCommand(sys.argv[2], sys.argv[3])
else:
print('runCommand: Invalid number of arguments!')
elif sys.argv[1] == 'regions':
for r in conn.get_all_regions():
print(r.name)
else:
print('Unsupported command (run without parameters for an overview)!')
| |
import numpy as np
from scipy.ndimage.filters import gaussian_filter
from ..util import img_as_float
from .util import _mask_border_keypoints, pairwise_hamming_distance
from ._brief_cy import _brief_loop
def brief(image, keypoints, descriptor_size=256, mode='normal', patch_size=49,
sample_seed=1, variance=2):
"""**Experimental function**.
Extract BRIEF Descriptor about given keypoints for a given image.
Parameters
----------
image : 2D ndarray
Input image.
keypoints : (P, 2) ndarray
Array of keypoint locations in the format (row, col).
descriptor_size : int
Size of BRIEF descriptor about each keypoint. Sizes 128, 256 and 512
preferred by the authors. Default is 256.
mode : string
Probability distribution for sampling location of decision pixel-pairs
around keypoints. Default is 'normal' otherwise uniform.
patch_size : int
Length of the two dimensional square patch sampling region around
the keypoints. Default is 49.
sample_seed : int
Seed for sampling the decision pixel-pairs. From a square window with
length patch_size, pixel pairs are sampled using the `mode` parameter
to build the descriptors using intensity comparison. The value of
`sample_seed` should be the same for the images to be matched while
building the descriptors. Default is 1.
variance : float
Variance of the Gaussian Low Pass filter applied on the image to
alleviate noise sensitivity. Default is 2.
Returns
-------
descriptors : (Q, `descriptor_size`) ndarray of dtype bool
2D ndarray of binary descriptors of size `descriptor_size` about Q
keypoints after filtering out border keypoints with value at an index
(i, j) either being True or False representing the outcome
of Intensity comparison about ith keypoint on jth decision pixel-pair.
keypoints : (Q, 2) ndarray
Location i.e. (row, col) of keypoints after removing out those that
are near border.
References
----------
.. [1] Michael Calonder, Vincent Lepetit, Christoph Strecha and Pascal Fua
"BRIEF : Binary robust independent elementary features",
http://cvlabwww.epfl.ch/~lepetit/papers/calonder_eccv10.pdf
Examples
--------
>> from skimage.feature import corner_peaks, corner_harris, \\
.. pairwise_hamming_distance, brief, match_keypoints_brief
>> square1 = np.zeros([8, 8], dtype=np.int32)
>> square1[2:6, 2:6] = 1
>> square1
array([[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)
>> keypoints1 = corner_peaks(corner_harris(square1), min_distance=1)
>> keypoints1
array([[2, 2],
[2, 5],
[5, 2],
[5, 5]])
>> descriptors1, keypoints1 = brief(square1, keypoints1, patch_size=5)
>> keypoints1
array([[2, 2],
[2, 5],
[5, 2],
[5, 5]])
>> square2 = np.zeros([9, 9], dtype=np.int32)
>> square2[2:7, 2:7] = 1
>> square2
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)
>> keypoints2 = corner_peaks(corner_harris(square2), min_distance=1)
>> keypoints2
array([[2, 2],
[2, 6],
[6, 2],
[6, 6]])
>> descriptors2, keypoints2 = brief(square2, keypoints2, patch_size=5)
>> keypoints2
array([[2, 2],
[2, 6],
[6, 2],
[6, 6]])
>> pairwise_hamming_distance(descriptors1, descriptors2)
array([[ 0.03125 , 0.3203125, 0.3671875, 0.6171875],
[ 0.3203125, 0.03125 , 0.640625 , 0.375 ],
[ 0.375 , 0.6328125, 0.0390625, 0.328125 ],
[ 0.625 , 0.3671875, 0.34375 , 0.0234375]])
>> match_keypoints_brief(keypoints1, descriptors1,
.. keypoints2, descriptors2)
array([[[ 2, 2],
[ 2, 2]],
[[ 2, 5],
[ 2, 6]],
[[ 5, 2],
[ 6, 2]],
[[ 5, 5],
[ 6, 6]]])
"""
np.random.seed(sample_seed)
image = np.squeeze(image)
if image.ndim != 2:
raise ValueError("Only 2-D gray-scale images supported.")
image = img_as_float(image)
# Gaussian Low pass filtering to alleviate noise
# sensitivity
image = gaussian_filter(image, variance)
image = np.ascontiguousarray(image)
keypoints = np.array(keypoints + 0.5, dtype=np.intp, order='C')
# Removing keypoints that are within (patch_size / 2) distance from the
# image border
keypoints = keypoints[_mask_border_keypoints(image, keypoints, patch_size // 2)]
keypoints = np.ascontiguousarray(keypoints)
descriptors = np.zeros((keypoints.shape[0], descriptor_size), dtype=bool,
order='C')
# Sampling pairs of decision pixels in patch_size x patch_size window
if mode == 'normal':
samples = (patch_size / 5.0) * np.random.randn(descriptor_size * 8)
samples = np.array(samples, dtype=np.int32)
samples = samples[(samples < (patch_size // 2))
& (samples > - (patch_size - 2) // 2)]
pos1 = samples[:descriptor_size * 2]
pos1 = pos1.reshape(descriptor_size, 2)
pos2 = samples[descriptor_size * 2:descriptor_size * 4]
pos2 = pos2.reshape(descriptor_size, 2)
else:
samples = np.random.randint(-(patch_size - 2) // 2,
(patch_size // 2) + 1,
(descriptor_size * 2, 2))
pos1, pos2 = np.split(samples, 2)
pos1 = np.ascontiguousarray(pos1)
pos2 = np.ascontiguousarray(pos2)
_brief_loop(image, descriptors.view(np.uint8), keypoints, pos1, pos2)
return descriptors, keypoints
def match_keypoints_brief(keypoints1, descriptors1, keypoints2,
descriptors2, threshold=0.15):
"""**Experimental function**.
Match keypoints described using BRIEF descriptors in one image to
those in second image.
Parameters
----------
keypoints1 : (M, 2) ndarray
M Keypoints from the first image described using skimage.feature.brief
descriptors1 : (M, P) ndarray
BRIEF descriptors of size P about M keypoints in the first image.
keypoints2 : (N, 2) ndarray
N Keypoints from the second image described using skimage.feature.brief
descriptors2 : (N, P) ndarray
BRIEF descriptors of size P about N keypoints in the second image.
threshold : float in range [0, 1]
Maximum allowable hamming distance between descriptors of two keypoints
in separate images to be regarded as a match. Default is 0.15.
Returns
-------
match_keypoints_brief : (Q, 2, 2) ndarray
Location of Q matched keypoint pairs from two images.
"""
if (keypoints1.shape[0] != descriptors1.shape[0]
or keypoints2.shape[0] != descriptors2.shape[0]):
raise ValueError("The number of keypoints and number of described "
"keypoints do not match. Make the optional parameter "
"return_keypoints True to get described keypoints.")
if descriptors1.shape[1] != descriptors2.shape[1]:
raise ValueError("Descriptor sizes for matching keypoints in both "
"the images should be equal.")
# Get hamming distances between keeypoints1 and keypoints2
distance = pairwise_hamming_distance(descriptors1, descriptors2)
temp = distance > threshold
row_check = np.any(~temp, axis=1)
matched_keypoints2 = keypoints2[np.argmin(distance, axis=1)]
matched_keypoint_pairs = np.zeros((np.sum(row_check), 2, 2), dtype=np.intp)
matched_keypoint_pairs[:, 0, :] = keypoints1[row_check]
matched_keypoint_pairs[:, 1, :] = matched_keypoints2[row_check]
return matched_keypoint_pairs
| |
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.test.utils import override_settings
import six
import cinderclient as cinder_client
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class CinderApiTests(test.APITestCase):
def test_volume_list(self):
search_opts = {'all_tenants': 1}
detailed = True
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts,).AndReturn(volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=detailed,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes = api.cinder.volume_list(self.request,
search_opts=search_opts)
self.assertEqual(len(volumes), len(api_volumes))
def test_volume_list_paged(self):
search_opts = {'all_tenants': 1}
detailed = True
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts,).AndReturn(volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=detailed,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, has_more, has_prev = api.cinder.volume_list_paged(
self.request, search_opts=search_opts)
self.assertEqual(len(volumes), len(api_volumes))
self.assertFalse(has_more)
self.assertFalse(has_prev)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_first_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[:page_size + 1]
expected_volumes = mox_volumes[:-1]
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:desc', marker=None).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertTrue(more_data)
self.assertFalse(prev_data)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_second_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[page_size:page_size * 2 + 1]
expected_volumes = mox_volumes[:-1]
marker = expected_volumes[0].id
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:desc', marker=marker).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, marker=marker,
paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertTrue(more_data)
self.assertTrue(prev_data)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_last_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[-1 * page_size:]
expected_volumes = mox_volumes
marker = expected_volumes[0].id
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:desc', marker=marker).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, marker=marker,
paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertFalse(more_data)
self.assertTrue(prev_data)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_back_from_some_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[page_size:page_size * 2 + 1]
expected_volumes = mox_volumes[:-1]
marker = expected_volumes[0].id
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:asc', marker=marker).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, sort_dir="asc",
marker=marker, paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertTrue(more_data)
self.assertTrue(prev_data)
@override_settings(API_RESULT_PAGE_SIZE=2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_volume_list_paginate_back_to_first_page(self):
api.cinder.VERSIONS._active = None
page_size = settings.API_RESULT_PAGE_SIZE
volumes = self.cinder_volumes.list()
volume_transfers = self.cinder_volume_transfers.list()
search_opts = {'all_tenants': 1}
mox_volumes = volumes[:page_size]
expected_volumes = mox_volumes
marker = expected_volumes[0].id
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts, limit=page_size + 1,
sort='created_at:asc', marker=marker).\
AndReturn(mox_volumes)
cinderclient.transfers = self.mox.CreateMockAnything()
cinderclient.transfers.list(
detailed=True,
search_opts=search_opts,).AndReturn(volume_transfers)
self.mox.ReplayAll()
api_volumes, more_data, prev_data = api.cinder.volume_list_paged(
self.request, search_opts=search_opts, sort_dir="asc",
marker=marker, paginate=True)
self.assertEqual(len(expected_volumes), len(api_volumes))
self.assertTrue(more_data)
self.assertFalse(prev_data)
def test_volume_snapshot_list(self):
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list_no_volume_configured(self):
# remove volume from service catalog
catalog = self.service_catalog
for service in catalog:
if service["type"] == "volume":
self.service_catalog.remove(service)
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_type_list_with_qos_associations(self):
volume_types = self.cinder_volume_types.list()
# Due to test data limitations, we can only run this test using
# one qos spec, which is associated with one volume type.
# If we use multiple qos specs, the test data will always
# return the same associated volume type, which is invalid
# and prevented by the UI.
qos_specs_full = self.cinder_qos_specs.list()
qos_specs_only_one = [qos_specs_full[0]]
associations = self.cinder_qos_spec_associations.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.list().AndReturn(volume_types)
cinderclient.qos_specs = self.mox.CreateMockAnything()
cinderclient.qos_specs.list().AndReturn(qos_specs_only_one)
cinderclient.qos_specs.get_associations = self.mox.CreateMockAnything()
cinderclient.qos_specs.get_associations(qos_specs_only_one[0].id).\
AndReturn(associations)
self.mox.ReplayAll()
assoc_vol_types = \
api.cinder.volume_type_list_with_qos_associations(self.request)
associate_spec = assoc_vol_types[0].associated_qos_spec
self.assertTrue(associate_spec, qos_specs_only_one[0].name)
def test_volume_type_get_with_qos_association(self):
volume_type = self.cinder_volume_types.first()
qos_specs_full = self.cinder_qos_specs.list()
qos_specs_only_one = [qos_specs_full[0]]
associations = self.cinder_qos_spec_associations.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.get(volume_type.id).AndReturn(volume_type)
cinderclient.qos_specs = self.mox.CreateMockAnything()
cinderclient.qos_specs.list().AndReturn(qos_specs_only_one)
cinderclient.qos_specs.get_associations = self.mox.CreateMockAnything()
cinderclient.qos_specs.get_associations(qos_specs_only_one[0].id).\
AndReturn(associations)
self.mox.ReplayAll()
assoc_vol_type = \
api.cinder.volume_type_get_with_qos_association(self.request,
volume_type.id)
associate_spec = assoc_vol_type.associated_qos_spec
self.assertTrue(associate_spec, qos_specs_only_one[0].name)
def test_absolute_limits_with_negative_values(self):
values = {"maxTotalVolumes": -1, "totalVolumesUsed": -1}
expected_results = {"maxTotalVolumes": float("inf"),
"totalVolumesUsed": 0}
limits = self.mox.CreateMockAnything()
limits.absolute = []
for key, val in six.iteritems(values):
limit = self.mox.CreateMockAnything()
limit.name = key
limit.value = val
limits.absolute.append(limit)
cinderclient = self.stub_cinderclient()
cinderclient.limits = self.mox.CreateMockAnything()
cinderclient.limits.get().AndReturn(limits)
self.mox.ReplayAll()
ret_val = api.cinder.tenant_absolute_limits(self.request)
for key in expected_results.keys():
self.assertEqual(expected_results[key], ret_val[key])
def test_pool_list(self):
pools = self.cinder_pools.list()
cinderclient = self.stub_cinderclient()
cinderclient.pools = self.mox.CreateMockAnything()
cinderclient.pools.list(detailed=True).AndReturn(pools)
self.mox.ReplayAll()
# No assertions are necessary. Verification is handled by mox.
api.cinder.pool_list(self.request, detailed=True)
def test_volume_type_default(self):
volume_type = self.cinder_volume_types.first()
cinderclient = self.stub_cinderclient()
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.default().AndReturn(volume_type)
self.mox.ReplayAll()
default_volume_type = api.cinder.volume_type_default(self.request)
self.assertEqual(default_volume_type, volume_type)
def test_cgroup_list(self):
cgroups = self.cinder_consistencygroups.list()
cinderclient = self.stub_cinderclient()
cinderclient.consistencygroups = self.mox.CreateMockAnything()
cinderclient.consistencygroups.list(search_opts=None).\
AndReturn(cgroups)
self.mox.ReplayAll()
api_cgroups = api.cinder.volume_cgroup_list(self.request)
self.assertEqual(len(cgroups), len(api_cgroups))
def test_cgroup_get(self):
cgroup = self.cinder_consistencygroups.first()
cinderclient = self.stub_cinderclient()
cinderclient.consistencygroups = self.mox.CreateMockAnything()
cinderclient.consistencygroups.get(cgroup.id).AndReturn(cgroup)
self.mox.ReplayAll()
api_cgroup = api.cinder.volume_cgroup_get(self.request, cgroup.id)
self.assertEqual(api_cgroup.name, cgroup.name)
self.assertEqual(api_cgroup.description, cgroup.description)
self.assertEqual(api_cgroup.volume_types, cgroup.volume_types)
def test_cgroup_list_with_vol_type_names(self):
cgroups = self.cinder_consistencygroups.list()
volume_types_list = self.cinder_volume_types.list()
cinderclient = self.stub_cinderclient()
cinderclient.consistencygroups = self.mox.CreateMockAnything()
cinderclient.consistencygroups.list(search_opts=None).\
AndReturn(cgroups)
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.list().AndReturn(volume_types_list)
self.mox.ReplayAll()
api_cgroups = api.cinder.volume_cgroup_list_with_vol_type_names(
self.request)
self.assertEqual(len(cgroups), len(api_cgroups))
for i in range(len(api_cgroups[0].volume_type_names)):
self.assertEqual(volume_types_list[i].name,
api_cgroups[0].volume_type_names[i])
def test_cgsnapshot_list(self):
cgsnapshots = self.cinder_cg_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.cgsnapshots = self.mox.CreateMockAnything()
cinderclient.cgsnapshots.list(search_opts=None).\
AndReturn(cgsnapshots)
self.mox.ReplayAll()
api_cgsnapshots = api.cinder.volume_cg_snapshot_list(self.request)
self.assertEqual(len(cgsnapshots), len(api_cgsnapshots))
def test_cgsnapshot_get(self):
cgsnapshot = self.cinder_cg_snapshots.first()
cinderclient = self.stub_cinderclient()
cinderclient.cgsnapshots = self.mox.CreateMockAnything()
cinderclient.cgsnapshots.get(cgsnapshot.id).AndReturn(cgsnapshot)
self.mox.ReplayAll()
api_cgsnapshot = api.cinder.volume_cg_snapshot_get(self.request,
cgsnapshot.id)
self.assertEqual(api_cgsnapshot.name, cgsnapshot.name)
self.assertEqual(api_cgsnapshot.description, cgsnapshot.description)
self.assertEqual(api_cgsnapshot.consistencygroup_id,
cgsnapshot.consistencygroup_id)
class CinderApiVersionTests(test.TestCase):
def setUp(self):
super(CinderApiVersionTests, self).setUp()
# The version is set when the module is loaded. Reset the
# active version each time so that we can test with different
# versions.
api.cinder.VERSIONS._active = None
def test_default_client_is_v2(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v2.client.Client)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_v2_setting_returns_v2_client(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v2.client.Client)
def test_get_v2_volume_attributes(self):
# Get a v2 volume
volume = self.cinder_volumes.get(name="v2_volume")
self.assertTrue(hasattr(volume._apiresource, 'name'))
self.assertFalse(hasattr(volume._apiresource, 'display_name'))
name = "A v2 test volume name"
description = "A v2 volume description"
setattr(volume._apiresource, 'name', name)
setattr(volume._apiresource, 'description', description)
self.assertEqual(name, volume.name)
self.assertEqual(description, volume.description)
def test_get_v2_snapshot_attributes(self):
# Get a v2 snapshot
snapshot = self.cinder_volume_snapshots.get(
description="v2 volume snapshot description")
self.assertFalse(hasattr(snapshot._apiresource, 'display_name'))
name = "A v2 test snapshot name"
description = "A v2 snapshot description"
setattr(snapshot._apiresource, 'name', name)
setattr(snapshot._apiresource, 'description', description)
self.assertEqual(name, snapshot.name)
self.assertEqual(description, snapshot.description)
def test_get_v2_snapshot_metadata(self):
# Get a v2 snapshot with metadata
snapshot = self.cinder_volume_snapshots.get(
description="v2 volume snapshot with metadata description")
self.assertTrue(hasattr(snapshot._apiresource, 'metadata'))
self.assertFalse(hasattr(snapshot._apiresource, 'display_name'))
key_name = "snapshot_meta_key"
key_value = "snapshot_meta_value"
metadata_value = {key_name: key_value}
setattr(snapshot._apiresource, 'metadata', metadata_value)
self.assertTrue(key_name in snapshot.metadata.keys())
self.assertEqual(key_value, snapshot.metadata['snapshot_meta_key'])
def test_get_id_for_nameless_volume(self):
volume = self.cinder_volumes.first()
setattr(volume._apiresource, 'display_name', "")
self.assertEqual(volume.id, volume.name)
def test_adapt_dictionary_to_v2(self):
volume = self.cinder_volumes.first()
data = {'name': volume.name,
'description': volume.description,
'size': volume.size}
ret_data = api.cinder._replace_v2_parameters(data)
self.assertIn('name', ret_data.keys())
self.assertIn('description', ret_data.keys())
self.assertNotIn('display_name', ret_data.keys())
self.assertNotIn('display_description', ret_data.keys())
| |
#----------------------------------------------------------------------
# Copyright (c) 2011 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
"""
The GPO Reference Aggregate Manager, showing how to implement
the GENI AM API. This AggregateManager has only fake resources.
Invoked from gcf-am.py
The GENI AM API is defined in the AggregateManager class.
"""
import base64
import datetime
import dateutil.parser
import logging
import os
import xml.dom.minidom as minidom
import xmlrpclib
import zlib
import geni
from SecureXMLRPCServer import SecureXMLRPCServer
# See sfa/trust/rights.py
# These are names of operations
# from the rights.py privilege_table
# Credentials may list privileges that
# map to these operations, giving the caller permission
# to perform the functions
RENEWSLIVERPRIV = 'renewsliver'
CREATESLIVERPRIV = 'createsliver'
DELETESLIVERPRIV = 'deleteslice'
SLIVERSTATUSPRIV = 'getsliceresources'
SHUTDOWNSLIVERPRIV = 'shutdown'
# Publicid format resource namespace. EG Resource URNs
# will be <namespace>:resource:<resourcetype>_<resourceid>
# This is something like the name of your AM
# See gen-certs.CERT_AUTHORITY
RESOURCE_NAMESPACE = 'geni//gpo//gcf'
REFAM_MAXLEASE_DAYS = 365
class AggregateManager(object):
"""The public API for a GENI Aggregate Manager. This class provides the
XMLRPC interface and invokes a delegate for all the operations.
"""
def __init__(self, delegate):
self._delegate = delegate
def GetVersion(self):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
return self._delegate.GetVersion()
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
return self._delegate.ListResources(credentials, options)
def CreateSliver(self, slice_urn, credentials, rspec, users):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
return self._delegate.CreateSliver(slice_urn, credentials, rspec, users)
def DeleteSliver(self, slice_urn, credentials):
"""Delete the given sliver. Return true on success."""
return self._delegate.DeleteSliver(slice_urn, credentials)
def SliverStatus(self, slice_urn, credentials):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.'''
return self._delegate.SliverStatus(slice_urn, credentials)
def RenewSliver(self, slice_urn, credentials, expiration_time):
"""Extend the life of the given sliver until the given
expiration time. Return False on error."""
return self._delegate.RenewSliver(slice_urn, credentials,
expiration_time)
def Shutdown(self, slice_urn, credentials):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
return self._delegate.Shutdown(slice_urn, credentials)
class PrintingAggregateManager(object):
"""A dummy AM that prints the called methods."""
def GetVersion(self):
print 'GetVersion()'
return 1
def ListResources(self, credentials, options):
compressed = False
if options and 'geni_compressed' in options:
compressed = options['geni_compressed']
print 'ListResources(compressed=%r)' % (compressed)
# return an empty rspec
result = '<rspec type="GCF"/>'
if compressed:
result = xmlrpclib.Binary(zlib.compress(result))
return result
def CreateSliver(self, slice_urn, credentials, rspec, users):
print 'CreateSliver(%r)' % (slice_urn)
return '<rspec type="GCF"/>'
def DeleteSliver(self, slice_urn, credentials):
print 'DeleteSliver(%r)' % (slice_urn)
return False
def SliverStatus(self, slice_urn, credentials):
print 'SliverStatus(%r)' % (slice_urn)
raise Exception('No such slice.')
def RenewSliver(self, slice_urn, credentials, expiration_time):
print 'SliverStatus(%r, %r)' % (slice_urn, expiration_time)
return False
def Shutdown(self, slice_urn, credentials):
print 'Shutdown(%r)' % (slice_urn)
return False
class AggregateManagerServer(object):
"""An XMLRPC Aggregate Manager Server. Delegates calls to given delegate,
or the default printing AM."""
def __init__(self, addr, delegate=None, keyfile=None, certfile=None,
ca_certs=None, base_name=None):
# ca_certs arg here must be a file of concatenated certs
if ca_certs is None:
raise Exception('Missing CA Certs')
elif not os.path.isfile(os.path.expanduser(ca_certs)):
raise Exception('CA Certs must be an existing file of accepted root certs: %s' % ca_certs)
# FIXME: set logRequests=true if --debug
self._server = SecureXMLRPCServer(addr, keyfile=keyfile,
certfile=certfile, ca_certs=ca_certs)
if delegate is None:
delegate = PrintingAggregateManager()
self._server.register_instance(AggregateManager(delegate))
# Set the server on the delegate so it can access the
# client certificate.
delegate._server = self._server
if not base_name is None:
global RESOURCE_NAMESPACE
RESOURCE_NAMESPACE = base_name
def serve_forever(self):
self._server.serve_forever()
def register_instance(self, instance):
# Pass the AM instance to the generic XMLRPC server,
# which lets it know what XMLRPC methods to expose
self._server.register_instance(instance)
class Resource(object):
"""A Resource has an id, a type, and a boolean indicating availability."""
STATUS_CONFIGURING = 'configuring'
STATUS_READY = 'ready'
STATUS_FAILED = 'failed'
STATUS_UNKNOWN = 'unknown'
STATUS_SHUTDOWN = 'shutdown'
def __str__(self):
return ("ID: %d, Type: %s, Available: %s, Status: %s" %
(self._id, self._type, self.available, self.status))
def __init__(self, id, type):
self._id = id
self._type = type
self.available = True
self.status = Resource.STATUS_UNKNOWN
def urn(self):
# User in SliverStatus
publicid = 'IDN %s//resource//%s_%s' % (RESOURCE_NAMESPACE, self._type, str(self._id))
return geni.publicid_to_urn(publicid)
def toxml(self):
template = ('<resource><urn>%s</urn><type>%s</type><id>%s</id>'
+ '<available>%r</available></resource>')
return template % (self.urn(), self._type, self._id, self.available)
def __eq__(self, other):
return self._id == other._id
def __neq__(self, other):
return self._id != other._id
@classmethod
def fromdom(cls, element):
"""Create a Resource instance from a DOM representation."""
type = element.getElementsByTagName('type')[0].firstChild.data
id = int(element.getElementsByTagName('id')[0].firstChild.data)
return Resource(id, type)
class Sliver(object):
"""A sliver has a URN, a list of resources, and an expiration time in UTC."""
def __init__(self, urn, expiration=datetime.datetime.utcnow()):
self.urn = urn.replace("+slice+", "+sliver+")
self.resources = list()
self.expiration = expiration
def status(self):
"""Determine the status of the sliver by examining the status
of each resource in the sliver.
"""
# If any resource is 'shutdown', the sliver is 'shutdown'
# Else if any resource is 'failed', the sliver is 'failed'
# Else if any resource is 'configuring', the sliver is 'configuring'
# Else if all resources are 'ready', the sliver is 'ready'
# Else the sliver is 'unknown'
rstat = [res.status for res in self.resources]
if Resource.STATUS_SHUTDOWN in rstat:
return Resource.STATUS_SHUTDOWN
elif Resource.STATUS_FAILED in rstat:
return Resource.STATUS_FAILED
elif Resource.STATUS_CONFIGURING in rstat:
return Resource.STATUS_CONFIGURING
elif rstat == [Resource.STATUS_READY for res in self.resources]:
# All resources report status of ready
return Resource.STATUS_READY
else:
return Resource.STATUS_UNKNOWN
class ReferenceAggregateManager(object):
'''A reference Aggregate Manager that manages fake resources.'''
# root_cert is a single cert or dir of multiple certs
# that are trusted to sign credentials
def __init__(self, root_cert):
self._slivers = dict()
self._resources = [Resource(x, 'Nothing') for x in range(10)]
self._cred_verifier = geni.CredentialVerifier(root_cert)
self.max_lease = datetime.timedelta(days=REFAM_MAXLEASE_DAYS)
self.logger = logging.getLogger('gcf-am.reference')
def GetVersion(self):
'''Specify version information about this AM. That could
include API version information, RSpec format and version
information, etc. Return a dict.'''
self.logger.info("Called GetVersion")
# FIXME: Fill in correct values for others
# url
# urn
# hostname
# code_tag
# hrn
default_ad = dict(type="GCF", version="0.1")
# FIXME: Those schema/namespace values are bogus. But the spec also says they are optional.
gcf_req = dict(type="GCF",
version="0.1",
schema="http://www.geni.net/resources/rspec/0.1/gcf-request.xsd",
namespace="http://www.geni.net/resources/rspec/0.1",
extensions=[])
gcf_ad = dict(type="GCF",
version="0.1",
schema="http://www.geni.net/resources/rspec/0.1/gcf-ad.xsd",
namespace="http://www.geni.net/resources/rspec/0.1",
extensions=[])
pgv2_req = dict(type="ProtoGENI",
version="2",
schema="http://www.protogeni.net/resources/rspec/2/request.xsd",
namespace="http://www.protogeni.net/resources/rspec/2",
extensions=[])
pgv2_ad = dict(type="ProtoGENI",
version="2",
schema="http://www.protogeni.net/resources/rspec/2/ad.xsd",
namespace="http://www.protogeni.net/resources/rspec/2",
extensions=[])
request_versions = [gcf_req, pgv2_req]
ad_versions = [gcf_ad, pgv2_ad]
versions = dict(default_ad_rspec=default_ad,
geni_api=1,
request_rspec_versions=request_versions,
ad_rspec_versions=ad_versions)
return versions
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def ListResources(self, credentials, options):
'''Return an RSpec of resources managed at this AM.
If a geni_slice_urn
is given in the options, then only return resources assigned
to that slice. If geni_available is specified in the options,
then only report available resources. And if geni_compressed
option is specified, then compress the result.'''
self.logger.info('ListResources(%r)' % (options))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
# could require list or listnodes?
privileges = ()
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
None,
privileges)
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if not options:
options = dict()
# Look to see what RSpec version the client requested
if 'rspec_version' in options:
# we only have one, so nothing to do here
# But an AM with multiple formats supported
# would use this to decide how to format the return.
# Can also error-check that the input value is supported.
rspec_type = options['rspec_version']['type']
if isinstance(rspec_type, str):
rspec_type = rspec_type.lower().strip()
rspec_version = options['rspec_version']['version']
if rspec_type != 'GCF':
self.logger.warn("Returning GCF rspec even though request said %s", rspec_type)
self.logger.info("ListResources requested rspec %s (%s)", rspec_type, rspec_version)
if 'geni_slice_urn' in options:
slice_urn = options['geni_slice_urn']
if slice_urn in self._slivers:
sliver = self._slivers[slice_urn]
result = ('<rspec type="GCF">'
+ ''.join([x.toxml() for x in sliver.resources])
+ '</rspec>')
else:
# return an empty rspec
result = '<rspec type="GCF"/>'
elif 'geni_available' in options and options['geni_available']:
# only include available items
result = ('<rspec type="GCF">' + ''.join([x.toxml() for x in self._resources])
+ '</rspec>')
# To make this AM return a fixed RSpec do:
# rspecfile = open('/tmp/sample-of-ad-rspec.xml')
# result = ''
# for line in rspecfile:
# result += line
# rspecfile.close()
else:
all_resources = list()
all_resources.extend(self._resources)
for sliver in self._slivers:
all_resources.extend(self._slivers[sliver].resources)
result = ('<rspec type="GCF">' + ''.join([x.toxml() for x in all_resources])
+ '</rspec>')
# self.logger.debug('Returning resource list %s', result)
# To make this AM return a fixed RSpec do:
# rspecfile = open('/tmp/sample-of-ad-rspec.xml')
# result = ''
# for line in rspecfile:
# result += line
# rspecfile.close()
# Optionally compress the result
if 'geni_compressed' in options and options['geni_compressed']:
try:
result = base64.b64encode(zlib.compress(result))
except Exception, exc:
import traceback
self.logger.error("Error compressing and encoding resource list: %s", traceback.format_exc())
raise Exception("Server error compressing resource list", exc)
return result
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def CreateSliver(self, slice_urn, credentials, rspec, users):
"""Create a sliver with the given URN from the resources in
the given RSpec.
Return an RSpec of the actually allocated resources.
users argument provides extra information on configuring the resources
for runtime access.
"""
self.logger.info('CreateSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (CREATESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
creds = self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slivers:
self.logger.error('Sliver %s already exists.' % slice_urn)
raise Exception('Sliver %s already exists.' % slice_urn)
rspec_dom = None
try:
rspec_dom = minidom.parseString(rspec)
except Exception, exc:
self.logger.error("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
raise Exception("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
# Look at the version of the input request RSpec
# Make sure it is supported
# Then make sure that you return an RSpec in the same format
# EG if both V1 and V2 are supported, and the user gives V2 request,
# then you must return a V2 request and not V1
resources = list()
for elem in rspec_dom.documentElement.getElementsByTagName('resource'):
resource = None
try:
resource = Resource.fromdom(elem)
except Exception, exc:
import traceback
self.logger.warning("Failed to parse resource from RSpec dom: %s", traceback.format_exc())
raise Exception("Cant create sliver %s. Exception parsing rspec: %s" % (slice_urn, exc))
if resource not in self._resources:
self.logger.info("Requested resource %d not available" % resource._id)
raise Exception('Resource %d not available' % resource._id)
resources.append(resource)
# determine max expiration time from credentials
# do not create a sliver that will outlive the slice!
expiration = datetime.datetime.utcnow() + self.max_lease
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
if credexp < expiration:
expiration = credexp
sliver = Sliver(slice_urn, expiration)
# remove resources from available list
for resource in resources:
sliver.resources.append(resource)
self._resources.remove(resource)
resource.available = False
resource.status = Resource.STATUS_READY
self._slivers[slice_urn] = sliver
self.logger.info("Created new sliver for slice %s" % slice_urn)
return ('<rspec type="GCF">' + ''.join([x.toxml() for x in sliver.resources])
+ '</rspec>')
# The list of credentials are options - some single cred
# must give the caller required permissions.
# The semantics of the API are unclear on this point, so
# this is just the current implementation
def DeleteSliver(self, slice_urn, credentials):
'''Stop and completely delete the named sliver, and return True.'''
self.logger.info('DeleteSliver(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (DELETESLIVERPRIV,)
# Note that verify throws an exception on failure.
# Use the client PEM format cert as retrieved
# from the https connection by the SecureXMLRPCServer
# to identify the caller.
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
# If we get here, the credentials give the caller
# all needed privileges to act on the given target.
if slice_urn in self._slivers:
sliver = self._slivers[slice_urn]
if sliver.status() == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not deleted because it is shutdown",
slice_urn)
return False
# return the resources to the pool
self._resources.extend(sliver.resources)
for resource in sliver.resources:
resource.available = True
resource.status = Resource.STATUS_UNKNOWN
del self._slivers[slice_urn]
self.logger.info("Sliver %r deleted" % slice_urn)
return True
else:
self._no_such_slice(slice_urn)
def SliverStatus(self, slice_urn, credentials):
'''Report as much as is known about the status of the resources
in the sliver. The AM may not know.
Return a dict of sliver urn, status, and a list of dicts resource
statuses.'''
# Loop over the resources in a sliver gathering status.
self.logger.info('SliverStatus(%r)' % (slice_urn))
# Note this list of privileges is really the name of an operation
# from the privilege_table in sfa/trust/rights.py
# Credentials will specify a list of privileges, each of which
# confers the right to perform a list of operations.
# EG the 'info' privilege in a credential allows the operations
# listslices, listnodes, policy
privileges = (SLIVERSTATUSPRIV,)
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
if slice_urn in self._slivers:
sliver = self._slivers[slice_urn]
# Now calculate the status of the sliver
res_status = list()
for res in sliver.resources:
# Gather the status of all the resources
# in the sliver. This could be actually
# communicating with the resources, or simply
# reporting the state of initialized, started, stopped, ...
res_status.append(dict(geni_urn=res.urn(),
geni_status=res.status,
geni_error=''))
self.logger.info("Calculated and returning sliver %r status" % slice_urn)
return dict(geni_urn=sliver.urn,
geni_status=sliver.status(),
geni_resources=res_status)
else:
self._no_such_slice(slice_urn)
def RenewSliver(self, slice_urn, credentials, expiration_time):
'''Renew the local sliver that is part of the named Slice
until the given expiration time (in UTC with a TZ per RFC3339).
Requires at least one credential that is valid until then.
Return False on any error, True on success.'''
self.logger.info('RenewSliver(%r, %r)' % (slice_urn, expiration_time))
privileges = (RENEWSLIVERPRIV,)
creds = self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
# All the credentials we just got are valid
if slice_urn in self._slivers:
# If any credential will still be valid at the newly
# requested time, then we can do this.
sliver = self._slivers.get(slice_urn)
if sliver.status() == Resource.STATUS_SHUTDOWN:
self.logger.info("Sliver %s not renewed because it is shutdown",
slice_urn)
return False
requested = dateutil.parser.parse(str(expiration_time))
# Per the AM API, the input time should be TZ-aware
# But since the slice cred may not (per ISO8601), convert
# it to naiveUTC for comparison
requested = self._naiveUTC(requested)
lastexp = 0
for cred in creds:
credexp = self._naiveUTC(cred.expiration)
lastexp = credexp
if credexp >= requested:
sliver.expiration = requested
self.logger.info("Sliver %r now expires on %r", slice_urn, expiration_time)
return True
else:
self.logger.debug("Valid cred %r expires at %r before %r", cred, credexp, requested)
# Fell through then no credential expires at or after
# newly requested expiration time
self.logger.info("Can't renew sliver %r until %r because none of %d credential(s) valid until then (last expires at %r)", slice_urn, expiration_time, len(creds), str(lastexp))
# FIXME: raise an exception so the client knows what
# really went wrong?
return False
else:
self._no_such_slice(slice_urn)
def Shutdown(self, slice_urn, credentials):
'''For Management Authority / operator use: shut down a badly
behaving sliver, without deleting it to allow for forensics.'''
self.logger.info('Shutdown(%r)' % (slice_urn))
privileges = (SHUTDOWNSLIVERPRIV,)
self._cred_verifier.verify_from_strings(self._server.pem_cert,
credentials,
slice_urn,
privileges)
if slice_urn in self._slivers:
sliver = self._slivers[slice_urn]
for resource in sliver.resources:
resource.status = Resource.STATUS_SHUTDOWN
self.logger.info("Sliver %r shut down" % slice_urn)
return True
else:
self.logger.info("Shutdown: No such slice: %s.", slice_urn)
self._no_such_slice(slice_urn)
def _no_such_slice(self, slice_urn):
"""Raise a no such slice exception."""
fault_code = 'No such slice.'
fault_string = 'The slice named by %s does not exist' % (slice_urn)
self.logger.warning(fault_string)
raise xmlrpclib.Fault(fault_code, fault_string)
def _naiveUTC(self, dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
| |
# Copyright 2012 NetApp
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
LVM Driver for shares.
"""
import math
import os
import re
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
import six
from manila import exception
from manila.i18n import _, _LE, _LI, _LW
from manila.share import driver
from manila.share.drivers import generic
LOG = log.getLogger(__name__)
share_opts = [
cfg.StrOpt('lvm_share_export_root',
default='$state_path/mnt',
help='Base folder where exported shares are located.'),
cfg.StrOpt('lvm_share_export_ip',
help='IP to be added to export string.'),
cfg.IntOpt('lvm_share_mirrors',
default=0,
help='If set, create LVMs with multiple mirrors. Note that '
'this requires lvm_mirrors + 2 PVs with available space.'),
cfg.StrOpt('lvm_share_volume_group',
default='lvm-shares',
help='Name for the VG that will contain exported shares.'),
cfg.ListOpt('lvm_share_helpers',
default=[
'CIFS=manila.share.drivers.helpers.CIFSHelperUserAccess',
'NFS=manila.share.drivers.helpers.NFSHelper',
],
help='Specify list of share export helpers.'),
]
CONF = cfg.CONF
CONF.register_opts(share_opts)
CONF.register_opts(generic.share_opts)
class LVMMixin(driver.ExecuteMixin):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
out, err = self._execute('vgs', '--noheadings', '-o', 'name',
run_as_root=True)
volume_groups = out.split()
if self.configuration.lvm_share_volume_group not in volume_groups:
msg = (_("share volume group %s doesn't exist")
% self.configuration.lvm_share_volume_group)
raise exception.InvalidParameterValue(err=msg)
if not self.configuration.lvm_share_export_ip:
msg = (_("lvm_share_export_ip isn't specified"))
raise exception.InvalidParameterValue(err=msg)
def _allocate_container(self, share):
sizestr = '%sG' % share['size']
cmd = ['lvcreate', '-L', sizestr, '-n', share['name'],
self.configuration.lvm_share_volume_group]
if self.configuration.lvm_share_mirrors:
cmd += ['-m', self.configuration.lvm_share_mirrors, '--nosync']
terras = int(sizestr[:-1]) / 1024.0
if terras >= 1.5:
rsize = int(2 ** math.ceil(math.log(terras) / math.log(2)))
# NOTE(vish): Next power of two for region size. See:
# http://red.ht/U2BPOD
cmd += ['-R', six.text_type(rsize)]
self._try_execute(*cmd, run_as_root=True)
device_name = self._get_local_path(share)
self._execute('mkfs.%s' % self.configuration.share_volume_fstype,
device_name, run_as_root=True)
def _extend_container(self, share, device_name, size):
cmd = ['lvextend', '-L', '%sG' % size, '-n', device_name]
self._try_execute(*cmd, run_as_root=True)
def _deallocate_container(self, share_name):
"""Deletes a logical volume for share."""
try:
self._try_execute('lvremove', '-f', "%s/%s" %
(self.configuration.lvm_share_volume_group,
share_name), run_as_root=True)
except exception.ProcessExecutionError as exc:
if "not found" not in exc.stderr:
LOG.exception(_LE("Error deleting volume"))
raise
LOG.warning(_LW("Volume not found: %s") % exc.stderr)
def create_snapshot(self, context, snapshot, share_server=None):
"""Creates a snapshot."""
orig_lv_name = "%s/%s" % (self.configuration.lvm_share_volume_group,
snapshot['share_name'])
self._try_execute(
'lvcreate', '-L', '%sG' % snapshot['share']['size'],
'--name', snapshot['name'],
'--snapshot', orig_lv_name, run_as_root=True)
def delete_snapshot(self, context, snapshot, share_server=None):
"""Deletes a snapshot."""
self._deallocate_container(snapshot['name'])
class LVMShareDriver(LVMMixin, driver.ShareDriver):
"""Executes commands relating to Shares."""
def __init__(self, *args, **kwargs):
"""Do initialization."""
super(LVMShareDriver, self).__init__([False], *args, **kwargs)
self.configuration.append_config_values(share_opts)
self.configuration.append_config_values(generic.share_opts)
self.configuration.share_mount_path = (
self.configuration.lvm_share_export_root)
self._helpers = None
self.backend_name = self.configuration.safe_get(
'share_backend_name') or 'LVM'
# Set of parameters used for compatibility with
# Generic driver's helpers.
self.share_server = {
'public_address': self.configuration.lvm_share_export_ip,
'instance_id': self.backend_name,
'lock_name': 'manila_lvm',
}
def _ssh_exec_as_root(self, server, command, check_exit_code=True):
kwargs = {}
if 'sudo' in command:
kwargs['run_as_root'] = True
command.remove('sudo')
kwargs['check_exit_code'] = check_exit_code
return self._execute(*command, **kwargs)
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(LVMShareDriver, self).do_setup(context)
self._setup_helpers()
def _setup_helpers(self):
"""Initializes protocol-specific NAS drivers."""
self._helpers = {}
for helper_str in self.configuration.lvm_share_helpers:
share_proto, _, import_str = helper_str.partition('=')
helper = importutils.import_class(import_str)
# TODO(rushiagr): better way to handle configuration
# instead of just passing to the helper
self._helpers[share_proto.upper()] = helper(
self._execute, self._ssh_exec_as_root, self.configuration)
def _get_local_path(self, share):
# The escape characters are expected by the device mapper.
escaped_group = (
self.configuration.lvm_share_volume_group.replace('-', '--'))
escaped_name = share['name'].replace('-', '--')
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
def _update_share_stats(self):
"""Retrieve stats info from share volume group."""
data = {
'share_backend_name': self.backend_name,
'storage_protocol': 'NFS_CIFS',
'reserved_percentage':
self.configuration.reserved_share_percentage,
'consistency_group_support': None,
'snapshot_support': True,
'driver_name': 'LVMShareDriver',
'pools': self.get_share_server_pools()
}
super(LVMShareDriver, self)._update_share_stats(data)
def get_share_server_pools(self, share_server=None):
out, err = self._execute('vgs',
self.configuration.lvm_share_volume_group,
'--rows', '--units', 'g',
run_as_root=True)
total_size = re.findall("VSize\s[0-9.]+g", out)[0][6:-1]
free_size = re.findall("VFree\s[0-9.]+g", out)[0][6:-1]
return [{
'pool_name': 'lvm-single-pool',
'total_capacity_gb': float(total_size),
'free_capacity_gb': float(free_size),
'reserved_percentage': 0,
}, ]
def create_share(self, context, share, share_server=None):
self._allocate_container(share)
# create file system
device_name = self._get_local_path(share)
location = self._get_helper(share).create_export(self.share_server,
share['name'])
self._mount_device(share, device_name)
return location
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Is called to create share from snapshot."""
self._allocate_container(share)
device_name = self._get_local_path(snapshot)
self._copy_volume(device_name, self._get_local_path(share),
share['size'])
location = self._get_helper(share).create_export(self.share_server,
share['name'])
self._mount_device(share, device_name)
return location
def delete_share(self, context, share, share_server=None):
self._remove_export(context, share)
self._delete_share(context, share)
self._deallocate_container(share['name'])
def _remove_export(self, ctx, share):
"""Removes an access rules for a share."""
mount_path = self._get_mount_path(share)
if os.path.exists(mount_path):
# umount, may be busy
try:
self._execute('umount', '-f', mount_path, run_as_root=True)
except exception.ProcessExecutionError as exc:
if 'device is busy' in six.text_type(exc):
raise exception.ShareBusyException(reason=share['name'])
else:
LOG.info(_LI('Unable to umount: %s'), exc)
# remove dir
try:
os.rmdir(mount_path)
except OSError:
LOG.warning(_LI('Unable to delete %s'), mount_path)
def ensure_share(self, ctx, share, share_server=None):
"""Ensure that storage are mounted and exported."""
device_name = self._get_local_path(share)
self._mount_device(share, device_name)
self._get_helper(share).create_export(self.share_server, share['name'],
recreate=True)
def _delete_share(self, ctx, share):
"""Delete a share."""
try:
self._get_helper(share).remove_export(self.share_server,
share['name'])
except exception.ProcessExecutionError:
LOG.warning(_LI("Can't remove share %r"), share['id'])
except exception.InvalidShare as exc:
LOG.warning(exc.message)
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""Update access rules for given share.
This driver has two different behaviors according to parameters:
1. Recovery after error - 'access_rules' contains all access_rules,
'add_rules' and 'delete_rules' shall be empty. Previously existing
access rules are cleared and then added back according
to 'access_rules'.
2. Adding/Deleting of several access rules - 'access_rules' contains
all access_rules, 'add_rules' and 'delete_rules' contain rules which
should be added/deleted. Rules in 'access_rules' are ignored and
only rules from 'add_rules' and 'delete_rules' are applied.
:param context: Current context
:param share: Share model with share data.
:param access_rules: All access rules for given share
:param add_rules: Empty List or List of access rules which should be
added. access_rules already contains these rules.
:param delete_rules: Empty List or List of access rules which should be
removed. access_rules doesn't contain these rules.
:param share_server: None or Share server model
"""
self._get_helper(share).update_access(self.share_server,
share['name'], access_rules,
add_rules=add_rules,
delete_rules=delete_rules)
def _get_helper(self, share):
if share['share_proto'].lower().startswith('nfs'):
return self._helpers['NFS']
elif share['share_proto'].lower().startswith('cifs'):
return self._helpers['CIFS']
else:
raise exception.InvalidShare(reason='Wrong share protocol')
def _mount_device(self, share, device_name):
"""Mount LVM share and ignore if already mounted."""
mount_path = self._get_mount_path(share)
self._execute('mkdir', '-p', mount_path)
try:
self._execute('mount', device_name, mount_path,
run_as_root=True, check_exit_code=True)
self._execute('chmod', '777', mount_path,
run_as_root=True, check_exit_code=True)
except exception.ProcessExecutionError:
out, err = self._execute('mount', '-l', run_as_root=True)
if device_name in out:
LOG.warning(_LW("%s is already mounted"), device_name)
else:
raise
return mount_path
def _unmount_device(self, share):
mount_path = self._get_mount_path(share)
self._execute('umount', mount_path, run_as_root=True)
self._execute('rmdir', mount_path, run_as_root=True)
def _get_mount_path(self, share):
"""Returns path where share is mounted."""
return os.path.join(self.configuration.share_mount_path,
share['name'])
def _copy_volume(self, srcstr, deststr, size_in_g):
# Use O_DIRECT to avoid thrashing the system buffer cache
extra_flags = ['iflag=direct', 'oflag=direct']
# Check whether O_DIRECT is supported
try:
self._execute('dd', 'count=0', 'if=%s' % srcstr, 'of=%s' % deststr,
*extra_flags, run_as_root=True)
except exception.ProcessExecutionError:
extra_flags = []
# Perform the copy
self._execute('dd', 'if=%s' % srcstr, 'of=%s' % deststr,
'count=%d' % (size_in_g * 1024), 'bs=1M',
*extra_flags, run_as_root=True)
def extend_share(self, share, new_size, share_server=None):
device_name = self._get_local_path(share)
self._extend_container(share, device_name, new_size)
self._execute('resize2fs', device_name, run_as_root=True)
| |
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
import mock
import testtools
from neutron.agent.linux import daemon
from neutron.common import exceptions
from neutron.tests import base
FAKE_FD = 8
class FakeEntry(object):
def __init__(self, name, value):
setattr(self, name, value)
class TestPrivileges(base.BaseTestCase):
def test_setuid_with_name(self):
with mock.patch('pwd.getpwnam', return_value=FakeEntry('pw_uid', 123)):
with mock.patch('os.setuid') as setuid_mock:
daemon.setuid('user')
setuid_mock.assert_called_once_with(123)
def test_setuid_with_id(self):
with mock.patch('os.setuid') as setuid_mock:
daemon.setuid('321')
setuid_mock.assert_called_once_with(321)
def test_setuid_fails(self):
with mock.patch('os.setuid', side_effect=OSError()):
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
self.assertRaises(exceptions.FailToDropPrivilegesExit,
daemon.setuid, '321')
log_critical.assert_called_once_with(mock.ANY)
def test_setgid_with_name(self):
with mock.patch('grp.getgrnam', return_value=FakeEntry('gr_gid', 123)):
with mock.patch('os.setgid') as setgid_mock:
daemon.setgid('group')
setgid_mock.assert_called_once_with(123)
def test_setgid_with_id(self):
with mock.patch('os.setgid') as setgid_mock:
daemon.setgid('321')
setgid_mock.assert_called_once_with(321)
def test_setgid_fails(self):
with mock.patch('os.setgid', side_effect=OSError()):
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
self.assertRaises(exceptions.FailToDropPrivilegesExit,
daemon.setgid, '321')
log_critical.assert_called_once_with(mock.ANY)
@mock.patch.object(os, 'setgroups')
@mock.patch.object(daemon, 'setgid')
@mock.patch.object(daemon, 'setuid')
def test_drop_no_privileges(self, mock_setuid, mock_setgid,
mock_setgroups):
daemon.drop_privileges()
for cursor in (mock_setuid, mock_setgid, mock_setgroups):
self.assertFalse(cursor.called)
@mock.patch.object(os, 'geteuid', return_value=0)
@mock.patch.object(os, 'setgroups')
@mock.patch.object(daemon, 'setgid')
@mock.patch.object(daemon, 'setuid')
def _test_drop_privileges(self, setuid, setgid, setgroups,
geteuid, user=None, group=None):
daemon.drop_privileges(user=user, group=group)
if user:
setuid.assert_called_once_with(user)
else:
self.assertFalse(setuid.called)
if group:
setgroups.assert_called_once_with([])
setgid.assert_called_once_with(group)
else:
self.assertFalse(setgroups.called)
self.assertFalse(setgid.called)
def test_drop_user_privileges(self):
self._test_drop_privileges(user='user')
def test_drop_uid_privileges(self):
self._test_drop_privileges(user='321')
def test_drop_group_privileges(self):
self._test_drop_privileges(group='group')
def test_drop_gid_privileges(self):
self._test_drop_privileges(group='654')
def test_drop_privileges_without_root_permissions(self):
with mock.patch('os.geteuid', return_value=1):
with mock.patch.object(daemon.LOG, 'critical') as log_critical:
self.assertRaises(exceptions.FailToDropPrivilegesExit,
daemon.drop_privileges, 'user')
log_critical.assert_called_once_with(mock.ANY)
class TestPidfile(base.BaseTestCase):
def setUp(self):
super(TestPidfile, self).setUp()
self.os_p = mock.patch.object(daemon, 'os')
self.os = self.os_p.start()
self.os.open.return_value = FAKE_FD
self.fcntl_p = mock.patch.object(daemon, 'fcntl')
self.fcntl = self.fcntl_p.start()
self.fcntl.flock.return_value = 0
def test_init(self):
self.os.O_CREAT = os.O_CREAT
self.os.O_RDWR = os.O_RDWR
daemon.Pidfile('thefile', 'python')
self.os.open.assert_called_once_with('thefile', os.O_CREAT | os.O_RDWR)
self.fcntl.flock.assert_called_once_with(FAKE_FD, self.fcntl.LOCK_EX |
self.fcntl.LOCK_NB)
def test_init_open_fail(self):
self.os.open.side_effect = IOError
with mock.patch.object(daemon.sys, 'stderr'):
with testtools.ExpectedException(SystemExit):
daemon.Pidfile('thefile', 'python')
sys.assert_has_calls([
mock.call.stderr.write(mock.ANY),
mock.call.exit(1)]
)
def test_unlock(self):
p = daemon.Pidfile('thefile', 'python')
p.unlock()
self.fcntl.flock.assert_has_calls([
mock.call(FAKE_FD, self.fcntl.LOCK_EX | self.fcntl.LOCK_NB),
mock.call(FAKE_FD, self.fcntl.LOCK_UN)]
)
def test_write(self):
p = daemon.Pidfile('thefile', 'python')
p.write(34)
self.os.assert_has_calls([
mock.call.ftruncate(FAKE_FD, 0),
mock.call.write(FAKE_FD, '34'),
mock.call.fsync(FAKE_FD)]
)
def test_read(self):
self.os.read.return_value = '34'
p = daemon.Pidfile('thefile', 'python')
self.assertEqual(34, p.read())
def test_is_running(self):
with mock.patch('six.moves.builtins.open') as mock_open:
p = daemon.Pidfile('thefile', 'python')
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = 'python'
with mock.patch.object(p, 'read') as read:
read.return_value = 34
self.assertTrue(p.is_running())
mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
def test_is_running_uuid_true(self):
with mock.patch('six.moves.builtins.open') as mock_open:
p = daemon.Pidfile('thefile', 'python', uuid='1234')
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = 'python 1234'
with mock.patch.object(p, 'read') as read:
read.return_value = 34
self.assertTrue(p.is_running())
mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
def test_is_running_uuid_false(self):
with mock.patch('six.moves.builtins.open') as mock_open:
p = daemon.Pidfile('thefile', 'python', uuid='6789')
mock_open.return_value.__enter__ = lambda s: s
mock_open.return_value.__exit__ = mock.Mock()
mock_open.return_value.readline.return_value = 'python 1234'
with mock.patch.object(p, 'read') as read:
read.return_value = 34
self.assertFalse(p.is_running())
mock_open.assert_called_once_with('/proc/34/cmdline', 'r')
class TestDaemon(base.BaseTestCase):
def setUp(self):
super(TestDaemon, self).setUp()
self.os_p = mock.patch.object(daemon, 'os')
self.os = self.os_p.start()
self.pidfile_p = mock.patch.object(daemon, 'Pidfile')
self.pidfile = self.pidfile_p.start()
def test_init(self):
d = daemon.Daemon('pidfile')
self.assertEqual(d.procname, 'python')
def test_init_nopidfile(self):
d = daemon.Daemon(pidfile=None)
self.assertEqual(d.procname, 'python')
self.assertFalse(self.pidfile.called)
def test_fork_parent(self):
self.os.fork.return_value = 1
d = daemon.Daemon('pidfile')
d._fork()
self.os._exit.assert_called_once_with(mock.ANY)
def test_fork_child(self):
self.os.fork.return_value = 0
d = daemon.Daemon('pidfile')
self.assertIsNone(d._fork())
def test_fork_error(self):
self.os.fork.side_effect = OSError(1)
with mock.patch.object(daemon.sys, 'stderr'):
with testtools.ExpectedException(SystemExit):
d = daemon.Daemon('pidfile', 'stdin')
d._fork()
def test_daemonize(self):
self.os.devnull = '/dev/null'
d = daemon.Daemon('pidfile')
with mock.patch.object(d, '_fork') as fork:
with mock.patch.object(daemon, 'atexit') as atexit:
with mock.patch.object(daemon, 'signal') as signal:
signal.SIGTERM = 15
with mock.patch.object(daemon, 'sys') as sys:
sys.stdin.fileno.return_value = 0
sys.stdout.fileno.return_value = 1
sys.stderr.fileno.return_value = 2
d.daemonize()
signal.signal.assert_called_once_with(15, d.handle_sigterm)
atexit.register.assert_called_once_with(d.delete_pid)
fork.assert_has_calls([mock.call(), mock.call()])
self.os.assert_has_calls([
mock.call.chdir('/'),
mock.call.setsid(),
mock.call.umask(0),
mock.call.dup2(mock.ANY, 0),
mock.call.dup2(mock.ANY, 1),
mock.call.dup2(mock.ANY, 2),
mock.call.getpid()]
)
def test_delete_pid(self):
self.pidfile.return_value.__str__.return_value = 'pidfile'
d = daemon.Daemon('pidfile')
d.delete_pid()
self.os.remove.assert_called_once_with('pidfile')
def test_handle_sigterm(self):
d = daemon.Daemon('pidfile')
with mock.patch.object(daemon, 'sys') as sys:
d.handle_sigterm(15, 1234)
sys.exit.assert_called_once_with(0)
def test_start(self):
self.pidfile.return_value.is_running.return_value = False
d = daemon.Daemon('pidfile')
with mock.patch.object(d, 'daemonize') as daemonize:
with mock.patch.object(d, 'run') as run:
d.start()
run.assert_called_once_with()
daemonize.assert_called_once_with()
def test_start_running(self):
self.pidfile.return_value.is_running.return_value = True
d = daemon.Daemon('pidfile')
with mock.patch.object(daemon.sys, 'stderr'):
with mock.patch.object(d, 'daemonize') as daemonize:
with testtools.ExpectedException(SystemExit):
d.start()
self.assertFalse(daemonize.called)
| |
#!/usr/bin/python3
import subprocess, shutil, os, sqlite3, re
import utils
from email_validator import validate_email as validate_email_, EmailNotValidError
import idna
def validate_email(email, mode=None):
# Checks that an email address is syntactically valid. Returns True/False.
# Until Postfix supports SMTPUTF8, an email address may contain ASCII
# characters only; IDNs must be IDNA-encoded.
#
# When mode=="user", we're checking that this can be a user account name.
# Dovecot has tighter restrictions - letters, numbers, underscore, and
# dash only!
#
# When mode=="alias", we're allowing anything that can be in a Postfix
# alias table, i.e. omitting the local part ("@domain.tld") is OK.
# Check the syntax of the address.
try:
validate_email_(email,
allow_smtputf8=False,
check_deliverability=False,
allow_empty_local=(mode=="alias")
)
except EmailNotValidError:
return False
if mode == 'user':
# There are a lot of characters permitted in email addresses, but
# Dovecot's sqlite auth driver seems to get confused if there are any
# unusual characters in the address. Bah. Also note that since
# the mailbox path name is based on the email address, the address
# shouldn't be absurdly long and must not have a forward slash.
# Our database is case sensitive (oops), which affects mail delivery
# (Postfix always queries in lowercase?), so also only permit lowercase
# letters.
if len(email) > 255: return False
if re.search(r'[^\@\.a-z0-9_\-]+', email):
return False
# Everything looks good.
return True
def sanitize_idn_email_address(email):
# The user may enter Unicode in an email address. Convert the domain part
# to IDNA before going into our database. Leave the local part alone ---
# although validate_email will reject non-ASCII characters.
#
# The domain name system only exists in ASCII, so it doesn't make sense
# to store domain names in Unicode. We want to store what is meaningful
# to the underlying protocols.
try:
localpart, domainpart = email.split("@")
domainpart = idna.encode(domainpart).decode('ascii')
return localpart + "@" + domainpart
except (ValueError, idna.IDNAError):
# ValueError: String does not have a single @-sign, so it is not
# a valid email address. IDNAError: Domain part is not IDNA-valid.
# Validation is not this function's job, so return value unchanged.
# If there are non-ASCII characters it will be filtered out by
# validate_email.
return email
def prettify_idn_email_address(email):
# This is the opposite of sanitize_idn_email_address. We store domain
# names in IDNA in the database, but we want to show Unicode to the user.
try:
localpart, domainpart = email.split("@")
domainpart = idna.decode(domainpart.encode("ascii"))
return localpart + "@" + domainpart
except (ValueError, UnicodeError, idna.IDNAError):
# Failed to decode IDNA, or the email address does not have a
# single @-sign. Should never happen.
return email
def is_dcv_address(email):
email = email.lower()
for localpart in ("admin", "administrator", "postmaster", "hostmaster", "webmaster", "abuse"):
if email.startswith(localpart+"@") or email.startswith(localpart+"+"):
return True
return False
def open_database(env, with_connection=False):
conn = sqlite3.connect(env["STORAGE_ROOT"] + "/mail/users.sqlite")
if not with_connection:
return conn.cursor()
else:
return conn, conn.cursor()
def get_mail_users(env):
# Returns a flat, sorted list of all user accounts.
c = open_database(env)
c.execute('SELECT email FROM users')
users = [ row[0] for row in c.fetchall() ]
return utils.sort_email_addresses(users, env)
def get_mail_users_ex(env, with_archived=False, with_slow_info=False):
# Returns a complex data structure of all user accounts, optionally
# including archived (status="inactive") accounts.
#
# [
# {
# domain: "domain.tld",
# users: [
# {
# email: "name@domain.tld",
# privileges: [ "priv1", "priv2", ... ],
# status: "active" | "inactive",
# },
# ...
# ]
# },
# ...
# ]
# Get users and their privileges.
users = []
active_accounts = set()
c = open_database(env)
c.execute('SELECT email, privileges FROM users')
for email, privileges in c.fetchall():
active_accounts.add(email)
user = {
"email": email,
"privileges": parse_privs(privileges),
"status": "active",
}
users.append(user)
if with_slow_info:
user["mailbox_size"] = utils.du(os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes', *reversed(email.split("@"))))
# Add in archived accounts.
if with_archived:
root = os.path.join(env['STORAGE_ROOT'], 'mail/mailboxes')
for domain in os.listdir(root):
if os.path.isdir(os.path.join(root, domain)):
for user in os.listdir(os.path.join(root, domain)):
email = user + "@" + domain
mbox = os.path.join(root, domain, user)
if email in active_accounts: continue
user = {
"email": email,
"privileges": "",
"status": "inactive",
"mailbox": mbox,
}
users.append(user)
if with_slow_info:
user["mailbox_size"] = utils.du(mbox)
# Group by domain.
domains = { }
for user in users:
domain = get_domain(user["email"])
if domain not in domains:
domains[domain] = {
"domain": domain,
"users": []
}
domains[domain]["users"].append(user)
# Sort domains.
domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
# Sort users within each domain first by status then lexicographically by email address.
for domain in domains:
domain["users"].sort(key = lambda user : (user["status"] != "active", user["email"]))
return domains
def get_admins(env):
# Returns a set of users with admin privileges.
users = set()
for domain in get_mail_users_ex(env):
for user in domain["users"]:
if "admin" in user["privileges"]:
users.add(user["email"])
return users
def get_mail_aliases(env):
# Returns a sorted list of tuples of (address, forward-tos, permitted-senders).
c = open_database(env)
c.execute('SELECT source, destination, permitted_senders FROM aliases')
aliases = { row[0]: row for row in c.fetchall() } # make dict
# put in a canonical order: sort by domain, then by email address lexicographically
aliases = [ aliases[address] for address in utils.sort_email_addresses(aliases.keys(), env) ]
return aliases
def get_mail_aliases_ex(env):
# Returns a complex data structure of all mail aliases, similar
# to get_mail_users_ex.
#
# [
# {
# domain: "domain.tld",
# alias: [
# {
# address: "name@domain.tld", # IDNA-encoded
# address_display: "name@domain.tld", # full Unicode
# forwards_to: ["user1@domain.com", "receiver-only1@domain.com", ...],
# permitted_senders: ["user1@domain.com", "sender-only1@domain.com", ...] OR null,
# required: True|False
# },
# ...
# ]
# },
# ...
# ]
required_aliases = get_required_aliases(env)
domains = {}
for address, forwards_to, permitted_senders in get_mail_aliases(env):
# get alias info
domain = get_domain(address)
required = (address in required_aliases)
# add to list
if not domain in domains:
domains[domain] = {
"domain": domain,
"aliases": [],
}
domains[domain]["aliases"].append({
"address": address,
"address_display": prettify_idn_email_address(address),
"forwards_to": [prettify_idn_email_address(r.strip()) for r in forwards_to.split(",")],
"permitted_senders": [prettify_idn_email_address(s.strip()) for s in permitted_senders.split(",")] if permitted_senders is not None else None,
"required": required,
})
# Sort domains.
domains = [domains[domain] for domain in utils.sort_domains(domains.keys(), env)]
# Sort aliases within each domain first by required-ness then lexicographically by address.
for domain in domains:
domain["aliases"].sort(key = lambda alias : (alias["required"], alias["address"]))
return domains
def get_domain(emailaddr, as_unicode=True):
# Gets the domain part of an email address. Turns IDNA
# back to Unicode for display.
ret = emailaddr.split('@', 1)[1]
if as_unicode:
try:
ret = idna.decode(ret.encode('ascii'))
except (ValueError, UnicodeError, idna.IDNAError):
# Looks like we have an invalid email address in
# the database. Now is not the time to complain.
pass
return ret
def get_mail_domains(env, filter_aliases=lambda alias : True):
# Returns the domain names (IDNA-encoded) of all of the email addresses
# configured on the system.
return set(
[get_domain(login, as_unicode=False) for login in get_mail_users(env)]
+ [get_domain(address, as_unicode=False) for address, *_ in get_mail_aliases(env) if filter_aliases(address) ]
)
def add_mail_user(email, pw, privs, env):
# validate email
if email.strip() == "":
return ("No email address provided.", 400)
elif not validate_email(email):
return ("Invalid email address.", 400)
elif not validate_email(email, mode='user'):
return ("User account email addresses may only use the lowercase ASCII letters a-z, the digits 0-9, underscore (_), hyphen (-), and period (.).", 400)
elif is_dcv_address(email) and len(get_mail_users(env)) > 0:
# Make domain control validation hijacking a little harder to mess up by preventing the usual
# addresses used for DCV from being user accounts. Except let it be the first account because
# during box setup the user won't know the rules.
return ("You may not make a user account for that address because it is frequently used for domain control validation. Use an alias instead if necessary.", 400)
# validate password
validate_password(pw)
# validate privileges
if privs is None or privs.strip() == "":
privs = []
else:
privs = privs.split("\n")
for p in privs:
validation = validate_privilege(p)
if validation: return validation
# get the database
conn, c = open_database(env, with_connection=True)
# hash the password
pw = hash_password(pw)
# add the user to the database
try:
c.execute("INSERT INTO users (email, password, privileges) VALUES (?, ?, ?)",
(email, pw, "\n".join(privs)))
except sqlite3.IntegrityError:
return ("User already exists.", 400)
# write databasebefore next step
conn.commit()
# Update things in case any new domains are added.
return kick(env, "mail user added")
def set_mail_password(email, pw, env):
# validate that password is acceptable
validate_password(pw)
# hash the password
pw = hash_password(pw)
# update the database
conn, c = open_database(env, with_connection=True)
c.execute("UPDATE users SET password=? WHERE email=?", (pw, email))
if c.rowcount != 1:
return ("That's not a user (%s)." % email, 400)
conn.commit()
return "OK"
def hash_password(pw):
# Turn the plain password into a Dovecot-format hashed password, meaning
# something like "{SCHEME}hashedpassworddata".
# http://wiki2.dovecot.org/Authentication/PasswordSchemes
return utils.shell('check_output', ["/usr/bin/doveadm", "pw", "-s", "SHA512-CRYPT", "-p", pw]).strip()
def get_mail_password(email, env):
# Gets the hashed password for a user. Passwords are stored in Dovecot's
# password format, with a prefixed scheme.
# http://wiki2.dovecot.org/Authentication/PasswordSchemes
# update the database
c = open_database(env)
c.execute('SELECT password FROM users WHERE email=?', (email,))
rows = c.fetchall()
if len(rows) != 1:
raise ValueError("That's not a user (%s)." % email)
return rows[0][0]
def remove_mail_user(email, env):
# remove
conn, c = open_database(env, with_connection=True)
c.execute("DELETE FROM users WHERE email=?", (email,))
if c.rowcount != 1:
return ("That's not a user (%s)." % email, 400)
conn.commit()
# Update things in case any domains are removed.
return kick(env, "mail user removed")
def parse_privs(value):
return [p for p in value.split("\n") if p.strip() != ""]
def get_mail_user_privileges(email, env, empty_on_error=False):
# get privs
c = open_database(env)
c.execute('SELECT privileges FROM users WHERE email=?', (email,))
rows = c.fetchall()
if len(rows) != 1:
if empty_on_error: return []
return ("That's not a user (%s)." % email, 400)
return parse_privs(rows[0][0])
def validate_privilege(priv):
if "\n" in priv or priv.strip() == "":
return ("That's not a valid privilege (%s)." % priv, 400)
return None
def add_remove_mail_user_privilege(email, priv, action, env):
# validate
validation = validate_privilege(priv)
if validation: return validation
# get existing privs, but may fail
privs = get_mail_user_privileges(email, env)
if isinstance(privs, tuple): return privs # error
# update privs set
if action == "add":
if priv not in privs:
privs.append(priv)
elif action == "remove":
privs = [p for p in privs if p != priv]
else:
return ("Invalid action.", 400)
# commit to database
conn, c = open_database(env, with_connection=True)
c.execute("UPDATE users SET privileges=? WHERE email=?", ("\n".join(privs), email))
if c.rowcount != 1:
return ("Something went wrong.", 400)
conn.commit()
return "OK"
def add_mail_alias(address, forwards_to, permitted_senders, env, update_if_exists=False, do_kick=True):
# convert Unicode domain to IDNA
address = sanitize_idn_email_address(address)
# Our database is case sensitive (oops), which affects mail delivery
# (Postfix always queries in lowercase?), so force lowercase.
address = address.lower()
# validate address
address = address.strip()
if address == "":
return ("No email address provided.", 400)
if not validate_email(address, mode='alias'):
return ("Invalid email address (%s)." % address, 400)
# validate forwards_to
validated_forwards_to = []
forwards_to = forwards_to.strip()
# extra checks for email addresses used in domain control validation
is_dcv_source = is_dcv_address(address)
# Postfix allows a single @domain.tld as the destination, which means
# the local part on the address is preserved in the rewrite. We must
# try to convert Unicode to IDNA first before validating that it's a
# legitimate alias address. Don't allow this sort of rewriting for
# DCV source addresses.
r1 = sanitize_idn_email_address(forwards_to)
if validate_email(r1, mode='alias') and not is_dcv_source:
validated_forwards_to.append(r1)
else:
# Parse comma and \n-separated destination emails & validate. In this
# case, the forwards_to must be complete email addresses.
for line in forwards_to.split("\n"):
for email in line.split(","):
email = email.strip()
if email == "": continue
email = sanitize_idn_email_address(email) # Unicode => IDNA
if not validate_email(email):
return ("Invalid receiver email address (%s)." % email, 400)
if is_dcv_source and not is_dcv_address(email) and "admin" not in get_mail_user_privileges(email, env, empty_on_error=True):
# Make domain control validation hijacking a little harder to mess up by
# requiring aliases for email addresses typically used in DCV to forward
# only to accounts that are administrators on this system.
return ("This alias can only have administrators of this system as destinations because the address is frequently used for domain control validation.", 400)
validated_forwards_to.append(email)
# validate permitted_senders
valid_logins = get_mail_users(env)
validated_permitted_senders = []
permitted_senders = permitted_senders.strip()
# Parse comma and \n-separated sender logins & validate. The permitted_senders must be
# valid usernames.
for line in permitted_senders.split("\n"):
for login in line.split(","):
login = login.strip()
if login == "": continue
if login not in valid_logins:
return ("Invalid permitted sender: %s is not a user on this system." % login, 400)
validated_permitted_senders.append(login)
# Make sure the alias has either a forwards_to or a permitted_sender.
if len(validated_forwards_to) + len(validated_permitted_senders) == 0:
return ("The alias must either forward to an address or have a permitted sender.", 400)
# save to db
forwards_to = ",".join(validated_forwards_to)
if len(validated_permitted_senders) == 0:
permitted_senders = None
else:
permitted_senders = ",".join(validated_permitted_senders)
conn, c = open_database(env, with_connection=True)
try:
c.execute("INSERT INTO aliases (source, destination, permitted_senders) VALUES (?, ?, ?)", (address, forwards_to, permitted_senders))
return_status = "alias added"
except sqlite3.IntegrityError:
if not update_if_exists:
return ("Alias already exists (%s)." % address, 400)
else:
c.execute("UPDATE aliases SET destination = ?, permitted_senders = ? WHERE source = ?", (forwards_to, permitted_senders, address))
return_status = "alias updated"
conn.commit()
if do_kick:
# Update things in case any new domains are added.
return kick(env, return_status)
def remove_mail_alias(address, env, do_kick=True):
# convert Unicode domain to IDNA
address = sanitize_idn_email_address(address)
# remove
conn, c = open_database(env, with_connection=True)
c.execute("DELETE FROM aliases WHERE source=?", (address,))
if c.rowcount != 1:
return ("That's not an alias (%s)." % address, 400)
conn.commit()
if do_kick:
# Update things in case any domains are removed.
return kick(env, "alias removed")
def get_system_administrator(env):
return "administrator@" + env['PRIMARY_HOSTNAME']
def get_required_aliases(env):
# These are the aliases that must exist.
aliases = set()
# The system administrator alias is required.
aliases.add(get_system_administrator(env))
# The hostmaster alias is exposed in the DNS SOA for each zone.
aliases.add("hostmaster@" + env['PRIMARY_HOSTNAME'])
# Get a list of domains we serve mail for, except ones for which the only
# email on that domain are the required aliases or a catch-all/domain-forwarder.
real_mail_domains = get_mail_domains(env,
filter_aliases = lambda alias :
not alias.startswith("postmaster@")
and not alias.startswith("admin@")
and not alias.startswith("abuse@")
and not alias.startswith("@")
)
# Create postmaster@, admin@ and abuse@ for all domains we serve
# mail on. postmaster@ is assumed to exist by our Postfix configuration.
# admin@isn't anything, but it might save the user some trouble e.g. when
# buying an SSL certificate.
# abuse@ is part of RFC2142: https://www.ietf.org/rfc/rfc2142.txt
for domain in real_mail_domains:
aliases.add("postmaster@" + domain)
aliases.add("admin@" + domain)
aliases.add("abuse@" + domain)
return aliases
def kick(env, mail_result=None):
results = []
# Include the current operation's result in output.
if mail_result is not None:
results.append(mail_result + "\n")
# Ensure every required alias exists.
existing_users = get_mail_users(env)
existing_alias_records = get_mail_aliases(env)
existing_aliases = set(a for a, *_ in existing_alias_records) # just first entry in tuple
required_aliases = get_required_aliases(env)
def ensure_admin_alias_exists(address):
# If a user account exists with that address, we're good.
if address in existing_users:
return
# If the alias already exists, we're good.
if address in existing_aliases:
return
# Doesn't exist.
administrator = get_system_administrator(env)
if address == administrator: return # don't make an alias from the administrator to itself --- this alias must be created manually
add_mail_alias(address, administrator, "", env, do_kick=False)
if administrator not in existing_aliases: return # don't report the alias in output if the administrator alias isn't in yet -- this is a hack to supress confusing output on initial setup
results.append("added alias %s (=> %s)\n" % (address, administrator))
for address in required_aliases:
ensure_admin_alias_exists(address)
# Remove auto-generated postmaster/admin on domains we no
# longer have any other email addresses for.
for address, forwards_to, *_ in existing_alias_records:
user, domain = address.split("@")
if user in ("postmaster", "admin", "abuse") \
and address not in required_aliases \
and forwards_to == get_system_administrator(env):
remove_mail_alias(address, env, do_kick=False)
results.append("removed alias %s (was to %s; domain no longer used for email)\n" % (address, forwards_to))
# Update DNS and nginx in case any domains are added/removed.
from dns_update import do_dns_update
results.append( do_dns_update(env) )
from web_update import do_web_update
results.append( do_web_update(env) )
return "".join(s for s in results if s != "")
def validate_password(pw):
# validate password
if pw.strip() == "":
raise ValueError("No password provided.")
if re.search(r"[\s]", pw):
raise ValueError("Passwords cannot contain spaces.")
if len(pw) < 8:
raise ValueError("Passwords must be at least eight characters.")
if __name__ == "__main__":
import sys
if len(sys.argv) > 2 and sys.argv[1] == "validate-email":
# Validate that we can create a Dovecot account for a given string.
if validate_email(sys.argv[2], mode='user'):
sys.exit(0)
else:
sys.exit(1)
if len(sys.argv) > 1 and sys.argv[1] == "update":
from utils import load_environment
print(kick(load_environment()))
| |
from types import SimpleNamespace
from typing import Dict
from unittest.mock import MagicMock, patch
from django.http import HttpRequest
from zerver.decorator import webhook_view
from zerver.lib.actions import do_rename_stream
from zerver.lib.exceptions import InvalidJSONError, JsonableError
from zerver.lib.send_email import FromAddress
from zerver.lib.test_classes import WebhookTestCase, ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock
from zerver.lib.users import get_api_key
from zerver.lib.webhooks.common import (
INVALID_JSON_MESSAGE,
MISSING_EVENT_HEADER_MESSAGE,
MissingHTTPEventHeader,
get_fixture_http_headers,
standardize_headers,
validate_extract_webhook_http_header,
)
from zerver.models import UserProfile, get_realm, get_user
class WebhooksCommonTestCase(ZulipTestCase):
def test_webhook_http_header_header_exists(self) -> None:
webhook_bot = get_user("webhook-bot@zulip.com", get_realm("zulip"))
request = HostRequestMock()
request.META["HTTP_X_CUSTOM_HEADER"] = "custom_value"
request.user = webhook_bot
header_value = validate_extract_webhook_http_header(
request, "X_CUSTOM_HEADER", "test_webhook"
)
self.assertEqual(header_value, "custom_value")
def test_webhook_http_header_header_does_not_exist(self) -> None:
webhook_bot = get_user("webhook-bot@zulip.com", get_realm("zulip"))
webhook_bot.last_reminder = None
notification_bot = self.notification_bot()
request = HostRequestMock()
request.user = webhook_bot
request.path = "some/random/path"
exception_msg = "Missing the HTTP event header 'X_CUSTOM_HEADER'"
with self.assertRaisesRegex(MissingHTTPEventHeader, exception_msg):
validate_extract_webhook_http_header(request, "X_CUSTOM_HEADER", "test_webhook")
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path=request.path,
header_name="X_CUSTOM_HEADER",
integration_name="test_webhook",
support_email=FromAddress.SUPPORT,
).rstrip()
self.assertEqual(msg.sender.email, notification_bot.email)
self.assertEqual(msg.content, expected_message)
def test_notify_bot_owner_on_invalid_json(self) -> None:
@webhook_view("ClientName", notify_bot_owner_on_invalid_json=False)
def my_webhook_no_notify(request: HttpRequest, user_profile: UserProfile) -> None:
raise InvalidJSONError("Malformed JSON")
@webhook_view("ClientName", notify_bot_owner_on_invalid_json=True)
def my_webhook_notify(request: HttpRequest, user_profile: UserProfile) -> None:
raise InvalidJSONError("Malformed JSON")
webhook_bot_email = "webhook-bot@zulip.com"
webhook_bot_realm = get_realm("zulip")
webhook_bot = get_user(webhook_bot_email, webhook_bot_realm)
webhook_bot_api_key = get_api_key(webhook_bot)
request = HostRequestMock()
request.POST["api_key"] = webhook_bot_api_key
request.host = "zulip.testserver"
expected_msg = INVALID_JSON_MESSAGE.format(webhook_name="ClientName")
last_message_id = self.get_last_message().id
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_no_notify(request)
# First verify that without the setting, it doesn't send a PM to bot owner.
msg = self.get_last_message()
self.assertEqual(msg.id, last_message_id)
self.assertNotEqual(msg.content, expected_msg.strip())
# Then verify that with the setting, it does send such a message.
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_notify(request)
msg = self.get_last_message()
self.assertNotEqual(msg.id, last_message_id)
self.assertEqual(msg.sender.email, self.notification_bot().email)
self.assertEqual(msg.content, expected_msg.strip())
@patch("zerver.lib.webhooks.common.importlib.import_module")
def test_get_fixture_http_headers_for_success(self, import_module_mock: MagicMock) -> None:
def fixture_to_headers(fixture_name: str) -> Dict[str, str]:
# A sample function which would normally perform some
# extra operations before returning a dictionary
# corresponding to the fixture name passed. For this test,
# we just return a fixed dictionary.
return {"key": "value"}
fake_module = SimpleNamespace(fixture_to_headers=fixture_to_headers)
import_module_mock.return_value = fake_module
headers = get_fixture_http_headers("some_integration", "complex_fixture")
self.assertEqual(headers, {"key": "value"})
def test_get_fixture_http_headers_for_non_existant_integration(self) -> None:
headers = get_fixture_http_headers("some_random_nonexistant_integration", "fixture_name")
self.assertEqual(headers, {})
@patch("zerver.lib.webhooks.common.importlib.import_module")
def test_get_fixture_http_headers_with_no_fixtures_to_headers_function(
self,
import_module_mock: MagicMock,
) -> None:
fake_module = SimpleNamespace()
import_module_mock.return_value = fake_module
self.assertEqual(
get_fixture_http_headers("some_integration", "simple_fixture"),
{},
)
def test_standardize_headers(self) -> None:
self.assertEqual(standardize_headers({}), {})
raw_headers = {"Content-Type": "text/plain", "X-Event-Type": "ping"}
djangoified_headers = standardize_headers(raw_headers)
expected_djangoified_headers = {"CONTENT_TYPE": "text/plain", "HTTP_X_EVENT_TYPE": "ping"}
self.assertEqual(djangoified_headers, expected_djangoified_headers)
class WebhookURLConfigurationTestCase(WebhookTestCase):
STREAM_NAME = "helloworld"
WEBHOOK_DIR_NAME = "helloworld"
URL_TEMPLATE = "/api/v1/external/helloworld?stream={stream}&api_key={api_key}"
def setUp(self) -> None:
super().setUp()
stream = self.subscribe(self.test_user, self.STREAM_NAME)
# In actual webhook tests, we will not need to use stream id.
# We assign stream id to STREAM_NAME for testing URL configuration only.
self.STREAM_NAME = str(stream.id)
do_rename_stream(stream, "helloworld_renamed", self.test_user)
self.url = self.build_webhook_url()
def test_trigger_stream_message_by_id(self) -> None:
# check_webhook cannot be used here as it
# subscribes the test user to self.STREAM_NAME
payload = self.get_body("hello")
self.send_webhook_payload(
self.test_user, self.url, payload, content_type="application/json"
)
expected_topic = "Hello World"
expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Marilyn Monroe](https://en.wikipedia.org/wiki/Marilyn_Monroe)**"
msg = self.get_last_message()
self.assert_stream_message(
message=msg,
stream_name="helloworld_renamed",
topic_name=expected_topic,
content=expected_message,
)
class MissingEventHeaderTestCase(WebhookTestCase):
STREAM_NAME = "groove"
URL_TEMPLATE = "/api/v1/external/groove?stream={stream}&api_key={api_key}"
# This tests the validate_extract_webhook_http_header function with
# an actual webhook, instead of just making a mock
def test_missing_event_header(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
result = self.client_post(
self.url,
self.get_body("ticket_state_changed"),
content_type="application/x-www-form-urlencoded",
)
self.assert_json_error(result, "Missing the HTTP event header 'X_GROOVE_EVENT'")
webhook_bot = get_user("webhook-bot@zulip.com", get_realm("zulip"))
webhook_bot.last_reminder = None
notification_bot = self.notification_bot()
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path="/api/v1/external/groove",
header_name="X_GROOVE_EVENT",
integration_name="Groove",
support_email=FromAddress.SUPPORT,
).rstrip()
if msg.sender.email != notification_bot.email: # nocoverage
# This block seems to fire occasionally; debug output:
print(msg)
print(msg.content)
self.assertEqual(msg.sender.email, notification_bot.email)
self.assertEqual(msg.content, expected_message)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("groove", fixture_name, file_type="json")
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteCrossConnectionsOperations:
"""ExpressRouteCrossConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCrossConnectionListResult"]:
"""Retrieves all the ExpressRouteCrossConnections in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteCrossConnections'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteCrossConnectionListResult"]:
"""Retrieves all the ExpressRouteCrossConnections in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteCrossConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections'} # type: ignore
async def get(
self,
resource_group_name: str,
cross_connection_name: str,
**kwargs: Any
) -> "_models.ExpressRouteCrossConnection":
"""Gets details about the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group (peering location of the circuit).
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection (service key of the
circuit).
:type cross_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteCrossConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
cross_connection_name: str,
parameters: "_models.ExpressRouteCrossConnection",
**kwargs: Any
) -> "_models.ExpressRouteCrossConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ExpressRouteCrossConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
cross_connection_name: str,
parameters: "_models.ExpressRouteCrossConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCrossConnection"]:
"""Update the specified ExpressRouteCrossConnection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param parameters: Parameters supplied to the update express route crossConnection operation.
:type parameters: ~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
cross_connection_name: str,
cross_connection_parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ExpressRouteCrossConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(cross_connection_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
cross_connection_name: str,
cross_connection_parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCrossConnection"]:
"""Updates an express route cross connection tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the cross connection.
:type cross_connection_name: str
:param cross_connection_parameters: Parameters supplied to update express route cross
connection tags.
:type cross_connection_parameters: ~azure.mgmt.network.v2018_10_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
cross_connection_parameters=cross_connection_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}'} # type: ignore
async def _list_arp_table_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsArpTableListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsArpTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self._list_arp_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_arp_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def begin_list_arp_table(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsArpTableListResult"]:
"""Gets the currently advertised ARP table associated with the express route cross connection in a
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsArpTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitsArpTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsArpTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_arp_table_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsArpTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_arp_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/arpTables/{devicePath}'} # type: ignore
async def _list_routes_table_summary_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_summary_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_summary_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def begin_list_routes_table_summary(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]:
"""Gets the route table summary associated with the express route cross connection in a resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCrossConnectionsRoutesTableSummaryListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCrossConnectionsRoutesTableSummaryListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_summary_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCrossConnectionsRoutesTableSummaryListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table_summary.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTablesSummary/{devicePath}'} # type: ignore
async def _list_routes_table_initial(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ExpressRouteCircuitsRoutesTableListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-10-01"
accept = "application/json"
# Construct URL
url = self._list_routes_table_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_list_routes_table_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
async def begin_list_routes_table(
self,
resource_group_name: str,
cross_connection_name: str,
peering_name: str,
device_path: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ExpressRouteCircuitsRoutesTableListResult"]:
"""Gets the currently advertised routes table associated with the express route cross connection
in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cross_connection_name: The name of the ExpressRouteCrossConnection.
:type cross_connection_name: str
:param peering_name: The name of the peering.
:type peering_name: str
:param device_path: The path of the device.
:type device_path: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ExpressRouteCircuitsRoutesTableListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_10_01.models.ExpressRouteCircuitsRoutesTableListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteCircuitsRoutesTableListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._list_routes_table_initial(
resource_group_name=resource_group_name,
cross_connection_name=cross_connection_name,
peering_name=peering_name,
device_path=device_path,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ExpressRouteCircuitsRoutesTableListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'crossConnectionName': self._serialize.url("cross_connection_name", cross_connection_name, 'str'),
'peeringName': self._serialize.url("peering_name", peering_name, 'str'),
'devicePath': self._serialize.url("device_path", device_path, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_list_routes_table.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteCrossConnections/{crossConnectionName}/peerings/{peeringName}/routeTables/{devicePath}'} # type: ignore
| |
#!/usr/bin/env python
"""
Using navboxplus to perfectly control a motor sensed with only a cheap encoder.
Model-augmented state is: [position, velocity, drag/inertia, b/inertia, disturbance].
"""
from __future__ import division
import numpy as np; npl = np.linalg
import matplotlib.pyplot as plt
from navboxplus import NavBoxPlus
# Motor dynamics
def motor(x, u, wf, dt):
xdot = np.array([x[1],
x[4] + x[3]*u - x[2]*x[1],
0, 0, 0]) # parameters "don't change" (we assume)
xnext = x + xdot*dt + wf
if xnext[2] < 0.5: xnext[2] = 0.5 # prevent parameter drift into nonphysical
if xnext[3] < 0.5: xnext[3] = 0.5
return xnext
# Encoder model (only noise in the form of discretization)
res = 512/360 # ticks/deg
z_per_t = 20 # samples/s
def encoder(x, u, wh):
return np.floor(res*x[0])
# True noise characteristics
wf0_true = np.array([0, 0, 0, 0, 0])
Cf_true = np.diag([0, 0, 1E-3, 1E-6, 0])
# Our guesses at the dynamics and sensor noise characteristics
# We cannot express any perfect confidence
wf0 = np.zeros(5)
Cf = np.diag([1E-7, 1E-4, 1E-3, 1E-6, 1E-2]) # disturbance is not really constant
wh0 = 0
Ch = 1 # because the encoder discretization acts like noise
# Simulation time domain (also chooses predict frequency)
T = 40 # s
dt = 0.05 # s
t = np.arange(0, T, dt) # s
i_per_z = int(1/(z_per_t*dt)) # iters/sample
assert 1/z_per_t >= dt # time between samples >= sim timestep ?
# Desired trajectory
# r = [180, 0] * np.ones((len(t), 2)) # setpoint, not much excitation information
rv = 0.5
r = 15*np.vstack((np.sin(rv*t), rv*np.cos(rv*t))).T # sinusoid, good excitation
# Unknown external disturbance (tracked as a state)
dist = 8*np.ones_like(t); dist[:len(t)//2] = 0 # sudden push
# dist = 3*np.cos(2*rv*(t+2)) + 3 # sinusoid
# Controller with feedback and feedforward based on estimated model
ulims = (-50, 50)
gains = 5*np.array([1, 1])
feedback = 0; feedforward = 0 # for externally recording these quantities
def controller(r, rnext, x, Cx, dt):
global feedback, feedforward
feedback = gains.dot(r - x[:2])
feedforward = (1/x[3]) * ((rnext[1] - r[1])/dt + x[2]*r[1] - x[4])
return np.clip(feedback + feedforward, ulims[0], ulims[1])
# State, estimate, covariance, measurement, and effort timeseries
x = np.zeros((len(t), 5))
xh = np.zeros((len(t), 5))
Cx = np.zeros((len(t), 5, 5))
z = np.zeros((len(t), 1))
u = np.zeros((len(t), 1))
uff = np.zeros((len(t), 1))
# Initial conditions
x[0] = [15, 0, 5, 2, dist[0]]
xh[0] = [-15, 10, 1, 1, 0]
Cx[0] = 10*np.eye(5)
u[0] = 0
uff[0] = 0
# Configure navboxplus
# (note that we will give a "smoothed" encoder model to capture its true behavior)
nav = NavBoxPlus(x0=np.copy(xh[0]),
Cx0=np.copy(Cx[0]),
g=controller,
f=motor,
hDict={'encoder': lambda x, u, wh: res*x[0] + wh},
n_r=2,
n_wf=5,
n_whDict={'encoder': 1})
# Simulation
for i, ti in enumerate(t[1:]):
# Chose control and predict next state
try:
u[i+1] = nav.predict(r[i], r[i+1], wf0, Cf, dt)
uff[i+1] = feedforward
except npl.linalg.LinAlgError:
print("Cholesky failed in predict!")
break
# Advance true state using control
wf = np.random.multivariate_normal(wf0_true, Cf_true)
x[i+1] = motor(x[i], u[i+1], wf, dt)
x[i+1, 4] = dist[i+1] # update disturbance
# When new measurement comes in...
if i % i_per_z == 0:
# Get new measurement from real world
z[i+1] = encoder(x[i+1], 0, 0)
# Update state estimate
try:
nav.correct('encoder', z[i+1], wh0, Ch)
except npl.linalg.LinAlgError:
print("Cholesky failed in correct!")
break
# ...otherwise hold last measurement (for plotting only)
else:
z[i+1] = np.copy(z[i])
# Record new estimate
xh[i+1], Cx[i+1] = nav.get_state_and_cov()
# Just checkin...
if not nav.is_pdef(nav.Cx):
print("WOAH your state estimate covariance is not posdef, how'd that happen?\n")
print("Final state estimate covariance:")
print(np.round(nav.Cx, 3))
#### Plots
fig1 = plt.figure()
fig1.suptitle("Estimation and Tracking via Online UKF-Learned Model", fontsize=22)
ax1 = fig1.add_subplot(6, 1, 1)
ax1.plot(t[:i], x[:i, 0], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 0], label="estimate", color='k', ls=':', lw=3)
ax1.plot(t[:i], r[:i, 0], label="desired", color='r', ls='--')
ax1.set_xlim([0, ti])
ax1.set_ylabel("position\ndeg", fontsize=12)
ax1.legend(loc='upper right')
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 2)
ax1.plot(t[:i], x[:i, 1], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 1], label="estimate", color='k', ls=':', lw=3)
ax1.plot(t[:i], r[:i, 1], label="desired", color='r', ls='--')
ax1.set_xlim([0, ti])
ax1.set_ylabel("velocity\ndeg/s", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 3)
ax1.plot(t[:i], x[:i, 2], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 2], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("drag/inertia\n(deg/s^2)/(deg/s)", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 4)
ax1.plot(t[:i], x[:i, 3], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 3], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("b/inertia\n(deg/s^2)/V", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 5)
ax1.plot(t[:i], x[:i, 4], label="true", color='g', lw=3)
ax1.plot(t[:i], xh[:i, 4], label="estimate", color='k', ls=':', lw=3)
ax1.set_xlim([0, ti])
ax1.set_ylabel("disturbance\ndeg/s^2", fontsize=12)
ax1.grid(True)
ax1 = fig1.add_subplot(6, 1, 6)
ax1.plot(t[:i], u[:i], label="total", color='r', lw=3)
ax1.plot(t[:i], uff[:i], label="feedforward", color='b', ls='--', lw=2)
ax1.set_xlim([0, ti])
ax1.set_ylabel("effort\nV", fontsize=12)
ax1.set_xlabel("time\ns", fontsize=12)
ax1.legend(loc='upper right')
ax1.grid(True)
fig2 = plt.figure()
fig2.suptitle("Covariance Diagonals", fontsize=22)
ax2 = fig2.add_subplot(1, 1, 1)
dvs = np.array(map(np.diag, Cx[:i]))
for xi in xrange(len(x[0])):
ax2.plot(t[:i], dvs[:, xi], label="State {}".format(xi))
ax2.set_xlim([0, ti])
ax2.set_ylabel("value", fontsize=16)
ax2.set_xlabel("time\ns", fontsize=16)
ax2.legend(loc='upper right')
ax2.grid(True)
fig3 = plt.figure()
fig3.suptitle("Absolute Encoder Measurements", fontsize=22)
ax3 = fig3.add_subplot(1, 1, 1)
ax3.plot(t[:i], z[:i], color='b', lw=2)
ax3.set_xlim([0, ti])
ax3.set_ylabel("ticks", fontsize=16)
ax3.set_xlabel("time\ns", fontsize=16)
ax3.grid(True)
plt.show()
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module provides the tools used to internally run the astropy test suite
from the installed astropy. It makes use of the `pytest` testing framework.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import functools
import os
import sys
import types
import warnings
import pytest
from ..extern import six
from ..extern.six.moves import cPickle as pickle
try:
# Import pkg_resources to prevent it from issuing warnings upon being
# imported from within py.test. See
# https://github.com/astropy/astropy/pull/537 for a detailed explanation.
import pkg_resources # pylint: disable=W0611
except ImportError:
pass
from ..utils.exceptions import (AstropyDeprecationWarning,
AstropyPendingDeprecationWarning)
# For backward-compatibility with affiliated packages
from .runner import TestRunner # pylint: disable=W0611
__all__ = ['raises', 'enable_deprecations_as_exceptions', 'remote_data',
'treat_deprecations_as_exceptions', 'catch_warnings',
'assert_follows_unicode_guidelines', 'quantity_allclose',
'assert_quantity_allclose', 'check_pickling_recovery',
'pickle_protocol', 'generic_recursive_equality_test']
# pytest marker to mark tests which get data from the web
remote_data = pytest.mark.remote_data
# This is for Python 2.x and 3.x compatibility. distutils expects
# options to all be byte strings on Python 2 and Unicode strings on
# Python 3.
def _fix_user_options(options):
def to_str_or_none(x):
if x is None:
return None
return str(x)
return [tuple(to_str_or_none(x) for x in y) for y in options]
def _save_coverage(cov, result, rootdir, testing_path):
"""
This method is called after the tests have been run in coverage mode
to cleanup and then save the coverage data and report.
"""
from ..utils.console import color_print
if result != 0:
return
# The coverage report includes the full path to the temporary
# directory, so we replace all the paths with the true source
# path. Note that this will not work properly for packages that still
# rely on 2to3.
try:
# Coverage 4.0: _harvest_data has been renamed to get_data, the
# lines dict is private
cov.get_data()
except AttributeError:
# Coverage < 4.0
cov._harvest_data()
lines = cov.data.lines
else:
lines = cov.data._lines
for key in list(lines.keys()):
new_path = os.path.relpath(
os.path.realpath(key),
os.path.realpath(testing_path))
new_path = os.path.abspath(
os.path.join(rootdir, new_path))
lines[new_path] = lines.pop(key)
color_print('Saving coverage data in .coverage...', 'green')
cov.save()
color_print('Saving HTML coverage report in htmlcov...', 'green')
cov.html_report(directory=os.path.join(rootdir, 'htmlcov'))
class raises(object):
"""
A decorator to mark that a test should raise a given exception.
Use as follows::
@raises(ZeroDivisionError)
def test_foo():
x = 1/0
This can also be used a context manager, in which case it is just
an alias for the ``pytest.raises`` context manager (because the
two have the same name this help avoid confusion by being
flexible).
"""
# pep-8 naming exception -- this is a decorator class
def __init__(self, exc):
self._exc = exc
self._ctx = None
def __call__(self, func):
@functools.wraps(func)
def run_raises_test(*args, **kwargs):
pytest.raises(self._exc, func, *args, **kwargs)
return run_raises_test
def __enter__(self):
self._ctx = pytest.raises(self._exc)
return self._ctx.__enter__()
def __exit__(self, *exc_info):
return self._ctx.__exit__(*exc_info)
_deprecations_as_exceptions = False
_include_astropy_deprecations = True
_modules_to_ignore_on_import = set([
'compiler', # A deprecated stdlib module used by py.test
'scipy',
'pygments',
'ipykernel',
'setuptools'])
_warnings_to_ignore_by_pyver = {
(3, 4): set([
# py.test reads files with the 'U' flag, which is now
# deprecated in Python 3.4.
r"'U' mode is deprecated",
# BeautifulSoup4 triggers warning in stdlib's html module.x
r"The strict argument and mode are deprecated\.",
r"The value of convert_charrefs will become True in 3\.5\. "
r"You are encouraged to set the value explicitly\."]),
(3, 5): set([
# py.test raises this warning on Python 3.5.
# This can be removed when fixed in py.test.
# See https://github.com/pytest-dev/pytest/pull/1009
r"inspect\.getargspec\(\) is deprecated, use "
r"inspect\.signature\(\) instead"])}
def enable_deprecations_as_exceptions(include_astropy_deprecations=True,
modules_to_ignore_on_import=[],
warnings_to_ignore_by_pyver={}):
"""
Turn on the feature that turns deprecations into exceptions.
Parameters
----------
include_astropy_deprecations : bool
If set to `True`, ``AstropyDeprecationWarning`` and
``AstropyPendingDeprecationWarning`` are also turned into exceptions.
modules_to_ignore_on_import : list of str
List of additional modules that generate deprecation warnings
on import, which are to be ignored. By default, these are already
included: ``compiler``, ``scipy``, ``pygments``, ``ipykernel``, and
``setuptools``.
warnings_to_ignore_by_pyver : dict
Dictionary mapping tuple of ``(major, minor)`` Python version to
a list of deprecation warning messages to ignore. This is in
addition of those already ignored by default
(see ``_warnings_to_ignore_by_pyver`` values).
"""
global _deprecations_as_exceptions
_deprecations_as_exceptions = True
global _include_astropy_deprecations
_include_astropy_deprecations = include_astropy_deprecations
global _modules_to_ignore_on_import
_modules_to_ignore_on_import.update(modules_to_ignore_on_import)
global _warnings_to_ignore_by_pyver
for key, val in six.iteritems(warnings_to_ignore_by_pyver):
if key in _warnings_to_ignore_by_pyver:
_warnings_to_ignore_by_pyver[key].update(val)
else:
_warnings_to_ignore_by_pyver[key] = set(val)
def treat_deprecations_as_exceptions():
"""
Turn all DeprecationWarnings (which indicate deprecated uses of
Python itself or Numpy, but not within Astropy, where we use our
own deprecation warning class) into exceptions so that we find
out about them early.
This completely resets the warning filters and any "already seen"
warning state.
"""
# First, totally reset the warning state. The modules may change during
# this iteration thus we copy the original state to a list to iterate
# on. See https://github.com/astropy/astropy/pull/5513.
for module in list(six.itervalues(sys.modules)):
# We don't want to deal with six.MovedModules, only "real"
# modules.
if (isinstance(module, types.ModuleType) and
hasattr(module, '__warningregistry__')):
del module.__warningregistry__
if not _deprecations_as_exceptions:
return
warnings.resetwarnings()
# Hide the next couple of DeprecationWarnings
warnings.simplefilter('ignore', DeprecationWarning)
# Here's the wrinkle: a couple of our third-party dependencies
# (py.test and scipy) are still using deprecated features
# themselves, and we'd like to ignore those. Fortunately, those
# show up only at import time, so if we import those things *now*,
# before we turn the warnings into exceptions, we're golden.
for m in _modules_to_ignore_on_import:
try:
__import__(m)
except ImportError:
pass
# Now, start over again with the warning filters
warnings.resetwarnings()
# Now, turn DeprecationWarnings into exceptions
warnings.filterwarnings("error", ".*", DeprecationWarning)
# Only turn astropy deprecation warnings into exceptions if requested
if _include_astropy_deprecations:
warnings.filterwarnings("error", ".*", AstropyDeprecationWarning)
warnings.filterwarnings("error", ".*", AstropyPendingDeprecationWarning)
for v in _warnings_to_ignore_by_pyver:
if sys.version_info[:2] >= v:
for s in _warnings_to_ignore_by_pyver[v]:
warnings.filterwarnings("ignore", s, DeprecationWarning)
class catch_warnings(warnings.catch_warnings):
"""
A high-powered version of warnings.catch_warnings to use for testing
and to make sure that there is no dependence on the order in which
the tests are run.
This completely blitzes any memory of any warnings that have
appeared before so that all warnings will be caught and displayed.
``*args`` is a set of warning classes to collect. If no arguments are
provided, all warnings are collected.
Use as follows::
with catch_warnings(MyCustomWarning) as w:
do.something.bad()
assert len(w) > 0
"""
def __init__(self, *classes):
super(catch_warnings, self).__init__(record=True)
self.classes = classes
def __enter__(self):
warning_list = super(catch_warnings, self).__enter__()
treat_deprecations_as_exceptions()
if len(self.classes) == 0:
warnings.simplefilter('always')
else:
warnings.simplefilter('ignore')
for cls in self.classes:
warnings.simplefilter('always', cls)
return warning_list
def __exit__(self, type, value, traceback):
treat_deprecations_as_exceptions()
class ignore_warnings(catch_warnings):
"""
This can be used either as a context manager or function decorator to
ignore all warnings that occur within a function or block of code.
An optional category option can be supplied to only ignore warnings of a
certain category or categories (if a list is provided).
"""
def __init__(self, category=None):
super(ignore_warnings, self).__init__()
if isinstance(category, type) and issubclass(category, Warning):
self.category = [category]
else:
self.category = category
def __call__(self, func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Originally this just reused self, but that doesn't work if the
# function is called more than once so we need to make a new
# context manager instance for each call
with self.__class__(category=self.category):
return func(*args, **kwargs)
return wrapper
def __enter__(self):
retval = super(ignore_warnings, self).__enter__()
if self.category is not None:
for category in self.category:
warnings.simplefilter('ignore', category)
else:
warnings.simplefilter('ignore')
return retval
def assert_follows_unicode_guidelines(
x, roundtrip=None):
"""
Test that an object follows our Unicode policy. See
"Unicode guidelines" in the coding guidelines.
Parameters
----------
x : object
The instance to test
roundtrip : module, optional
When provided, this namespace will be used to evaluate
``repr(x)`` and ensure that it roundtrips. It will also
ensure that ``__bytes__(x)`` and ``__unicode__(x)`` roundtrip.
If not provided, no roundtrip testing will be performed.
"""
from .. import conf
from ..extern import six
with conf.set_temp('unicode_output', False):
bytes_x = bytes(x)
unicode_x = six.text_type(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, six.text_type)
unicode_x.encode('ascii')
assert isinstance(repr_x, six.string_types)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
with conf.set_temp('unicode_output', True):
bytes_x = bytes(x)
unicode_x = six.text_type(x)
repr_x = repr(x)
assert isinstance(bytes_x, bytes)
bytes_x.decode('ascii')
assert isinstance(unicode_x, six.text_type)
assert isinstance(repr_x, six.string_types)
if isinstance(repr_x, bytes):
repr_x.decode('ascii')
else:
repr_x.encode('ascii')
if roundtrip is not None:
assert x.__class__(bytes_x) == x
assert x.__class__(unicode_x) == x
assert eval(repr_x, roundtrip) == x
@pytest.fixture(params=[0, 1, -1])
def pickle_protocol(request):
"""
Fixture to run all the tests for protocols 0 and 1, and -1 (most advanced).
(Originally from astropy.table.tests.test_pickle)
"""
return request.param
def generic_recursive_equality_test(a, b, class_history):
"""
Check if the attributes of a and b are equal. Then,
check if the attributes of the attributes are equal.
"""
dict_a = a.__dict__
dict_b = b.__dict__
for key in dict_a:
assert key in dict_b,\
"Did not pickle {0}".format(key)
if hasattr(dict_a[key], '__eq__'):
eq = (dict_a[key] == dict_b[key])
if '__iter__' in dir(eq):
eq = (False not in eq)
assert eq, "Value of {0} changed by pickling".format(key)
if hasattr(dict_a[key], '__dict__'):
if dict_a[key].__class__ in class_history:
#attempt to prevent infinite recursion
pass
else:
new_class_history = [dict_a[key].__class__]
new_class_history.extend(class_history)
generic_recursive_equality_test(dict_a[key],
dict_b[key],
new_class_history)
def check_pickling_recovery(original, protocol):
"""
Try to pickle an object. If successful, make sure
the object's attributes survived pickling and unpickling.
"""
f = pickle.dumps(original, protocol=protocol)
unpickled = pickle.loads(f)
class_history = [original.__class__]
generic_recursive_equality_test(original, unpickled,
class_history)
def assert_quantity_allclose(actual, desired, rtol=1.e-7, atol=None,
**kwargs):
"""
Raise an assertion if two objects are not equal up to desired tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.testing.assert_allclose`.
"""
import numpy as np
np.testing.assert_allclose(*_unquantify_allclose_arguments(actual, desired,
rtol, atol),
**kwargs)
def quantity_allclose(a, b, rtol=1.e-5, atol=None, **kwargs):
"""
Returns True if two arrays are element-wise equal within a tolerance.
This is a :class:`~astropy.units.Quantity`-aware version of
:func:`numpy.allclose`.
"""
import numpy as np
return np.allclose(*_unquantify_allclose_arguments(a, b, rtol, atol),
**kwargs)
def _unquantify_allclose_arguments(actual, desired, rtol, atol):
from .. import units as u
actual = u.Quantity(actual, subok=True, copy=False)
desired = u.Quantity(desired, subok=True, copy=False)
try:
desired = desired.to(actual.unit)
except u.UnitsError:
raise u.UnitsError("Units for 'desired' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(desired.unit, actual.unit))
if atol is None:
# by default, we assume an absolute tolerance of 0
atol = u.Quantity(0)
else:
atol = u.Quantity(atol, subok=True, copy=False)
try:
atol = atol.to(actual.unit)
except u.UnitsError:
raise u.UnitsError("Units for 'atol' ({0}) and 'actual' ({1}) "
"are not convertible"
.format(atol.unit, actual.unit))
rtol = u.Quantity(rtol, subok=True, copy=False)
try:
rtol = rtol.to(u.dimensionless_unscaled)
except Exception:
raise u.UnitsError("`rtol` should be dimensionless")
return actual.value, desired.value, rtol.value, atol.value
| |
# Copyright (c) 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
import os
import re
import sys
import tempfile
import textwrap
import fixtures
import mock
import pkg_resources
import six
import testscenarios
from testtools import matchers
from pbr import git
from pbr import packaging
from pbr.tests import base
class TestRepo(fixtures.Fixture):
"""A git repo for testing with.
Use of TempHomeDir with this fixture is strongly recommended as due to the
lack of config --local in older gits, it will write to the users global
configuration without TempHomeDir.
"""
def __init__(self, basedir):
super(TestRepo, self).__init__()
self._basedir = basedir
def setUp(self):
super(TestRepo, self).setUp()
base._run_cmd(['git', 'init', '.'], self._basedir)
base._config_git()
base._run_cmd(['git', 'add', '.'], self._basedir)
def commit(self, message_content='test commit'):
files = len(os.listdir(self._basedir))
path = self._basedir + '/%d' % files
open(path, 'wt').close()
base._run_cmd(['git', 'add', path], self._basedir)
base._run_cmd(['git', 'commit', '-m', message_content], self._basedir)
def uncommit(self):
base._run_cmd(['git', 'reset', '--hard', 'HEAD^'], self._basedir)
def tag(self, version):
base._run_cmd(
['git', 'tag', '-sm', 'test tag', version], self._basedir)
class GPGKeyFixture(fixtures.Fixture):
"""Creates a GPG key for testing.
It's recommended that this be used in concert with a unique home
directory.
"""
def setUp(self):
super(GPGKeyFixture, self).setUp()
tempdir = self.useFixture(fixtures.TempDir())
gnupg_version_re = re.compile('^gpg\s.*\s([\d+])\.([\d+])\.([\d+])')
gnupg_version = base._run_cmd(['gpg', '--version'], tempdir.path)
for line in gnupg_version[0].split('\n'):
gnupg_version = gnupg_version_re.match(line)
if gnupg_version:
gnupg_version = (int(gnupg_version.group(1)),
int(gnupg_version.group(2)),
int(gnupg_version.group(3)))
break
else:
if gnupg_version is None:
gnupg_version = (0, 0, 0)
config_file = tempdir.path + '/key-config'
f = open(config_file, 'wt')
try:
if gnupg_version[0] == 2 and gnupg_version[1] >= 1:
f.write("""
%no-protection
%transient-key
""")
f.write("""
%no-ask-passphrase
Key-Type: RSA
Name-Real: Example Key
Name-Comment: N/A
Name-Email: example@example.com
Expire-Date: 2d
Preferences: (setpref)
%commit
""")
finally:
f.close()
# Note that --quick-random (--debug-quick-random in GnuPG 2.x)
# does not have a corresponding preferences file setting and
# must be passed explicitly on the command line instead
if gnupg_version[0] == 1:
gnupg_random = '--quick-random'
elif gnupg_version[0] >= 2:
gnupg_random = '--debug-quick-random'
else:
gnupg_random = ''
base._run_cmd(
['gpg', '--gen-key', '--batch', gnupg_random, config_file],
tempdir.path)
class TestPackagingInGitRepoWithCommit(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestPackagingInGitRepoWithCommit, self).setUp()
repo = self.useFixture(TestRepo(self.package_dir))
repo.commit()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# One commit, something should be in the authors list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertNotEqual(body, '')
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
# One commit, something should be in the ChangeLog list
self.assertNotEqual(body, '')
def test_manifest_exclude_honoured(self):
self.run_setup('sdist', allow_fail=False)
with open(os.path.join(
self.package_dir,
'pbr_testpackage.egg-info/SOURCES.txt'), 'r') as f:
body = f.read()
self.assertThat(
body, matchers.Not(matchers.Contains('pbr_testpackage/extra.py')))
self.assertThat(body, matchers.Contains('pbr_testpackage/__init__.py'))
def test_install_writes_changelog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(stdout, matchers.Contains('Generating ChangeLog'))
class TestPackagingInGitRepoWithoutCommit(base.BaseTestCase):
def setUp(self):
super(TestPackagingInGitRepoWithoutCommit, self).setUp()
self.useFixture(TestRepo(self.package_dir))
self.run_setup('sdist', allow_fail=False)
def test_authors(self):
# No commits, no authors in list
with open(os.path.join(self.package_dir, 'AUTHORS'), 'r') as f:
body = f.read()
self.assertEqual(body, '\n')
def test_changelog(self):
# No commits, nothing should be in the ChangeLog list
with open(os.path.join(self.package_dir, 'ChangeLog'), 'r') as f:
body = f.read()
self.assertEqual(body, 'CHANGES\n=======\n\n')
class TestPackagingInPlainDirectory(base.BaseTestCase):
def setUp(self):
super(TestPackagingInPlainDirectory, self).setUp()
def test_authors(self):
self.run_setup('sdist', allow_fail=False)
# Not a git repo, no AUTHORS file created
filename = os.path.join(self.package_dir, 'AUTHORS')
self.assertFalse(os.path.exists(filename))
def test_changelog(self):
self.run_setup('sdist', allow_fail=False)
# Not a git repo, no ChangeLog created
filename = os.path.join(self.package_dir, 'ChangeLog')
self.assertFalse(os.path.exists(filename))
def test_install_no_ChangeLog(self):
stdout, _, _ = self.run_setup(
'install', '--root', self.temp_dir + 'installed',
allow_fail=False)
self.expectThat(
stdout, matchers.Not(matchers.Contains('Generating ChangeLog')))
class TestPresenceOfGit(base.BaseTestCase):
def testGitIsInstalled(self):
with mock.patch.object(git,
'_run_shell_command') as _command:
_command.return_value = 'git version 1.8.4.1'
self.assertEqual(True, git._git_is_installed())
def testGitIsNotInstalled(self):
with mock.patch.object(git,
'_run_shell_command') as _command:
_command.side_effect = OSError
self.assertEqual(False, git._git_is_installed())
class TestNestedRequirements(base.BaseTestCase):
def test_nested_requirement(self):
tempdir = tempfile.mkdtemp()
requirements = os.path.join(tempdir, 'requirements.txt')
nested = os.path.join(tempdir, 'nested.txt')
with open(requirements, 'w') as f:
f.write('-r ' + nested)
with open(nested, 'w') as f:
f.write('pbr')
result = packaging.parse_requirements([requirements])
self.assertEqual(result, ['pbr'])
class TestVersions(base.BaseTestCase):
scenarios = [
('preversioned', dict(preversioned=True)),
('postversioned', dict(preversioned=False)),
]
def setUp(self):
super(TestVersions, self).setUp()
self.repo = self.useFixture(TestRepo(self.package_dir))
self.useFixture(GPGKeyFixture())
self.useFixture(base.DiveDir(self.package_dir))
def test_capitalized_headers(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('Sem-Ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_capitalized_headers_partial(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('Sem-ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_tagged_version_has_tag_version(self):
self.repo.commit()
self.repo.tag('1.2.3')
version = packaging._get_version_from_git('1.2.3')
self.assertEqual('1.2.3', version)
def test_untagged_version_has_dev_version_postversion(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.dev1'))
def test_untagged_pre_release_has_pre_dev_version_postversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1'))
def test_untagged_version_minor_bump(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: deprecation')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.3.0.dev1'))
def test_untagged_version_major_bump(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: api-break')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.0.dev1'))
def test_untagged_version_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
version = packaging._get_version_from_git('1.2.5')
self.assertThat(version, matchers.StartsWith('1.2.5.dev1'))
def test_untagged_version_after_pre_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git('1.2.5')
self.assertThat(version, matchers.StartsWith('1.2.5.dev1'))
def test_untagged_version_after_rc_has_dev_version_preversion(self):
self.repo.commit()
self.repo.tag('1.2.3.0a1')
self.repo.commit()
version = packaging._get_version_from_git('1.2.3')
self.assertThat(version, matchers.StartsWith('1.2.3.0a2.dev1'))
def test_preversion_too_low_simple(self):
# That is, the target version is either already released or not high
# enough for the semver requirements given api breaks etc.
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
# Note that we can't target 1.2.3 anymore - with 1.2.3 released we
# need to be working on 1.2.4.
err = self.assertRaises(
ValueError, packaging._get_version_from_git, '1.2.3')
self.assertThat(err.args[0], matchers.StartsWith('git history'))
def test_preversion_too_low_semver_headers(self):
# That is, the target version is either already released or not high
# enough for the semver requirements given api breaks etc.
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit('sem-ver: feature')
# Note that we can't target 1.2.4, the feature header means we need
# to be working on 1.3.0 or above.
err = self.assertRaises(
ValueError, packaging._get_version_from_git, '1.2.4')
self.assertThat(err.args[0], matchers.StartsWith('git history'))
def test_get_kwargs_corner_cases(self):
# No tags:
git_dir = self.repo._basedir + '/.git'
get_kwargs = lambda tag: packaging._get_increment_kwargs(git_dir, tag)
def _check_combinations(tag):
self.repo.commit()
self.assertEqual(dict(), get_kwargs(tag))
self.repo.commit('sem-ver: bugfix')
self.assertEqual(dict(), get_kwargs(tag))
self.repo.commit('sem-ver: feature')
self.assertEqual(dict(minor=True), get_kwargs(tag))
self.repo.uncommit()
self.repo.commit('sem-ver: deprecation')
self.assertEqual(dict(minor=True), get_kwargs(tag))
self.repo.uncommit()
self.repo.commit('sem-ver: api-break')
self.assertEqual(dict(major=True), get_kwargs(tag))
self.repo.commit('sem-ver: deprecation')
self.assertEqual(dict(major=True, minor=True), get_kwargs(tag))
_check_combinations('')
self.repo.tag('1.2.3')
_check_combinations('1.2.3')
def test_invalid_tag_ignored(self):
# Fix for bug 1356784 - we treated any tag as a version, not just those
# that are valid versions.
self.repo.commit()
self.repo.tag('1')
self.repo.commit()
# when the tree is tagged and its wrong:
self.repo.tag('badver')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.0.1.dev1'))
# When the tree isn't tagged, we also fall through.
self.repo.commit()
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.0.1.dev2'))
# We don't fall through x.y versions
self.repo.commit()
self.repo.tag('1.2')
self.repo.commit()
self.repo.tag('badver2')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.1.dev1'))
# Or x.y.z versions
self.repo.commit()
self.repo.tag('1.2.3')
self.repo.commit()
self.repo.tag('badver3')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.dev1'))
# Or alpha/beta/pre versions
self.repo.commit()
self.repo.tag('1.2.4.0a1')
self.repo.commit()
self.repo.tag('badver4')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('1.2.4.0a2.dev1'))
# Non-release related tags are ignored.
self.repo.commit()
self.repo.tag('2')
self.repo.commit()
self.repo.tag('non-release-tag/2014.12.16-1')
version = packaging._get_version_from_git()
self.assertThat(version, matchers.StartsWith('2.0.1.dev1'))
def test_valid_tag_honoured(self):
# Fix for bug 1370608 - we converted any target into a 'dev version'
# even if there was a distance of 0 - indicating that we were on the
# tag itself.
self.repo.commit()
self.repo.tag('1.3.0.0a1')
version = packaging._get_version_from_git()
self.assertEqual('1.3.0.0a1', version)
def test_skip_write_git_changelog(self):
# Fix for bug 1467440
self.repo.commit()
self.repo.tag('1.2.3')
os.environ['SKIP_WRITE_GIT_CHANGELOG'] = '1'
version = packaging._get_version_from_git('1.2.3')
self.assertEqual('1.2.3', version)
def tearDown(self):
super(TestVersions, self).tearDown()
os.environ.pop('SKIP_WRITE_GIT_CHANGELOG', None)
class TestRequirementParsing(base.BaseTestCase):
def test_requirement_parsing(self):
tempdir = self.useFixture(fixtures.TempDir()).path
requirements = os.path.join(tempdir, 'requirements.txt')
with open(requirements, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
bar
quux<1.0; python_version=='2.6'
requests-aws>=0.1.4 # BSD License (3 clause)
Routes>=1.12.3,!=2.0,!=2.1;python_version=='2.7'
requests-kerberos>=0.6;python_version=='2.7' # MIT
""")))
setup_cfg = os.path.join(tempdir, 'setup.cfg')
with open(setup_cfg, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
[metadata]
name = test_reqparse
[extras]
test =
foo
baz>3.2 :python_version=='2.7'
""")))
# pkg_resources.split_sections uses None as the title of an
# anonymous section instead of the empty string. Weird.
expected_requirements = {
None: ['bar', 'requests-aws>=0.1.4'],
":(python_version=='2.6')": ['quux<1.0'],
":(python_version=='2.7')": ['Routes>=1.12.3,!=2.0,!=2.1',
'requests-kerberos>=0.6'],
'test': ['foo'],
"test:(python_version=='2.7')": ['baz>3.2']
}
setup_py = os.path.join(tempdir, 'setup.py')
with open(setup_py, 'wt') as f:
f.write(textwrap.dedent(six.u("""\
#!/usr/bin/env python
import setuptools
setuptools.setup(
setup_requires=['pbr'],
pbr=True,
)
""")))
self._run_cmd(sys.executable, (setup_py, 'egg_info'),
allow_fail=False, cwd=tempdir)
egg_info = os.path.join(tempdir, 'test_reqparse.egg-info')
requires_txt = os.path.join(egg_info, 'requires.txt')
with open(requires_txt, 'rt') as requires:
generated_requirements = dict(
pkg_resources.split_sections(requires))
self.assertEqual(expected_requirements, generated_requirements)
def load_tests(loader, in_tests, pattern):
return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern)
| |
# -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2021 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
###############################################################################
import socketserver
import http.server
import posixpath
import urllib.request, urllib.parse, urllib.error
import urllib.parse
from typing import Dict, Any
import gemf_reader
from httphandler import AVNHTTPHandler, WebSocketHandler
try:
import create_overview
except:
pass
from wpahandler import *
from avnav_manager import *
hasIfaces=False
try:
import netifaces
hasIfaces=True
except:
pass
import threading
#a HTTP server with threads for each request
class AVNHTTPServer(socketserver.ThreadingMixIn,http.server.HTTPServer, AVNWorker):
webSocketHandlers: Dict[str, WebSocketHandler]
navxml=AVNUtil.NAVXML
@classmethod
def getConfigName(cls):
return "AVNHttpServer"
@classmethod
def createInstance(cls, cfgparam):
cls.checkSingleInstance()
return AVNHTTPServer(cfgparam, AVNHTTPHandler)
@classmethod
def getConfigParam(cls, child=None):
if child == "Directory":
return {
"urlpath":None,
"path":None
}
if child == "MimeType":
return {
'extension':None,
'type':None
}
if child == 'UserTool':
return {
'url':None, #we replace $HOST...
'title':'',
'icon':None, #an icon below $datadir/user
'keepUrl':'' #auto detect
}
if not child is None:
return None
rt={
"basedir":"",
"navurl":"/viewer/avnav_navi.php", #those must be absolute with /
"index":"/viewer/avnav_viewer.html",
"chartbase": "maps", #this is the URL without leading /!
"httpPort":"8080",
"numThreads":"5",
"httpHost":"",
}
return rt
def __init__(self,cfgparam,RequestHandlerClass):
replace=AVNHandlerManager.filterBaseParam(cfgparam)
if cfgparam.get('basedir')== '.':
#some migration of the older setting - we want to use our global dir function, so consider . to be empty
cfgparam['basedir']=''
self.basedir=AVNHandlerManager.getDirWithDefault(cfgparam, 'basedir', defaultSub='', belowData=False)
datadir=cfgparam[AVNHandlerManager.BASEPARAM.DATADIR]
pathmappings={}
marray=cfgparam.get("Directory")
if marray is not None:
pathmappings={}
for mapping in marray:
pathmappings[mapping['urlpath']]=AVNUtil.prependBase(AVNUtil.replaceParam(os.path.expanduser(mapping['path']),replace),self.basedir)
if pathmappings.get('user') is None:
pathmappings['user']=os.path.join(datadir,'user')
self.pathmappings=pathmappings
charturl=cfgparam['chartbase']
if charturl is not None:
#set a default chart dir if not set via config url mappings
if self.pathmappings.get(charturl) is None:
self.pathmappings[charturl]=os.path.join(cfgparam[AVNHandlerManager.BASEPARAM.DATADIR], "charts")
self.navurl=cfgparam['navurl']
self.overwrite_map=({
'.png': 'image/png',
'.js': 'text/javascript; charset=utf-8'
})
mtypes=cfgparam.get('MimeType')
if mtypes is not None:
for mtype in mtypes:
self.overwrite_map[mtype['extension']]=mtype['type']
server_address=(cfgparam['httpHost'],int(cfgparam['httpPort']))
AVNWorker.__init__(self, cfgparam)
self.type=AVNWorker.Type.HTTPSERVER
self.handlers={}
self.interfaceReader=None
self.addresslist=[]
self.handlerMap={}
self.externalHandlers={} #prefixes that will be handled externally
self.webSocketHandlers={}
http.server.HTTPServer.__init__(self, server_address, RequestHandlerClass, True)
def run(self):
self.freeAllUsedResources()
self.claimUsedResource(UsedResource.T_TCP,self.server_port,force=True)
self.setNameIfEmpty("%s-%d"%(self.getName(),self.server_port))
AVNLog.info("HTTP server "+self.server_name+", "+str(self.server_port)+" started at thread "+self.name)
self.setInfo('main',"serving at port %s"%(str(self.server_port)),WorkerStatus.RUNNING)
if hasIfaces:
self.interfaceReader=threading.Thread(target=self.readInterfaces)
self.interfaceReader.daemon=True
self.interfaceReader.start()
self.serve_forever()
def handlePathmapping(self,path):
if not self.pathmappings is None:
for mk in list(self.pathmappings.keys()):
if path.find(mk) == 0:
path=self.pathmappings[mk]+path[len(mk):]
AVNLog.ld("remapped path to",path)
return path
path=os.path.join(self.basedir,path)
return path
else:
return path
def getChartBaseDir(self):
chartbaseurl=self.getStringParam('chartbase')
return self.handlePathmapping(chartbaseurl)
def getHandler(self,name):
if self.handlers.get(name) is not None:
return self.handlers.get(name)
rt=self.findHandlerByName(name)
if rt is not None:
self.handlers[name]=rt
return rt
#read out all IP addresses
def readInterfaces(self):
while True:
addresses=[]
interfaces=netifaces.interfaces()
for intf in interfaces:
intfaddr=netifaces.ifaddresses(intf)
if intfaddr is not None:
ips=intfaddr.get(netifaces.AF_INET)
if ips is not None:
for ip in ips:
if ip.get('addr') is not None:
addresses.append(ip.get('addr')+":"+str(self.server_port))
self.addresslist=addresses
time.sleep(5)
def getStatusProperties(self):
if self.addresslist is not None and len(self.addresslist) > 0:
return {'addresses':self.addresslist}
else:
return {}
def registerRequestHandler(self,type,command,handler):
if type == 'path':
self.externalHandlers[command]=handler
return
if type == 'websocket':
self.webSocketHandlers[command]=handler
return
if self.handlerMap.get(type) is None:
self.handlerMap[type]={}
self.handlerMap[type][command]=handler
def getRequestHandler(self,type,command):
typeMap=self.handlerMap.get(type)
if typeMap is None:
return None
return typeMap.get(command)
def plainUrlToPath(self,path,usePathMapping=True):
'''
@param path: the URL as received
@param usePathMapping: if true use the mapping table
@return: an OS path
'''
words = path.split('/')
words = [_f for _f in words if _f]
path = ""
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (".",".."): continue
path = os.path.join(path, word)
AVNLog.ld("request path",path)
if not usePathMapping:
return path
#pathmappings expect to have absolute pathes!
return self.handlePathmapping(path)
@classmethod
def pathQueryFromUrl(cls,url):
(path, sep, query) = url.partition('?')
path = path.split('#', 1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
return (path,query)
def tryExternalMappings(self,path,query,handler=None):
requestParam=urllib.parse.parse_qs(query,True)
for prefix in list(self.externalHandlers.keys()):
if path.startswith(prefix):
# the external handler can either return a mapped path (already
# converted in an OS path - e.g. using plainUrlToPath)
# or just do the handling by its own and return None
try:
return self.externalHandlers[prefix].handleApiRequest('path', path, requestParam, server=self,handler=handler)
except:
AVNLog.error("external mapping failed for %s: %s",path,traceback.format_exc())
return None
#legacy fallback:
#if we have images at /user/images or /user/icons we can fallback to viewer
#new pathes should be /user/viewer/images
for prefix in ['icons','images']:
cp="/user/"+prefix
if path.startswith(cp):
osPath = self.plainUrlToPath(path, True)
if os.path.exists(osPath):
return osPath
return self.plainUrlToPath("/viewer/images/"+path[len(cp)+1:],True)
def getWebSocketsHandler(self,path,query,handler):
requestParam=urllib.parse.parse_qs(query,True)
for prefix in list(self.webSocketHandlers.keys()):
if path.startswith(prefix):
# the external handler can either return a mapped path (already
# converted in an OS path - e.g. using plainUrlToPath)
# or just do the handling by its own and return None
try:
return self.webSocketHandlers[prefix].handleApiRequest('websocket', path, requestParam, server=self,handler=handler)
except:
AVNLog.error("no websocket handler %s: %s",path,traceback.format_exc())
return None
avnav_handlerList.registerHandler(AVNHTTPServer)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import logging
import logging.config
import logging.handlers
import sys
import traceback
import six
from six import moves
from oslo_context import context as context_utils
from oslo_serialization import jsonutils
def _dictify_context(context):
if context is None:
return {}
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
# A configuration object is given to us when the application registers
# the logging options.
_CONF = None
def _store_global_conf(conf):
global _CONF
_CONF = conf
def _update_record_with_context(record):
"""Given a log record, update it with context information.
The request context, if there is one, will either be in the
extra values for the incoming record or in the global
thread-local store.
"""
context = record.__dict__.get(
'context',
context_utils.get_current()
)
d = _dictify_context(context)
# Copy the context values directly onto the record so they can be
# used by the formatting strings.
for k, v in d.items():
setattr(record, k, v)
return context
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
# Build the extra values that were given to us, including
# the context.
context = _update_record_with_context(record)
if hasattr(record, 'extra'):
extra = record.extra.copy()
else:
extra = {}
for key in getattr(record, 'extra_keys', []):
if key not in extra:
extra[key] = getattr(record, key)
# If we saved a context object, explode it into the extra
# dictionary because the values are more useful than the
# object reference.
if 'context' in extra:
extra.update(_dictify_context(context))
del extra['context']
message['extra'] = extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
If available, uses the context value stored in TLS - local.store.context
"""
def __init__(self, *args, **kwargs):
"""Initialize ContextFormatter instance
Takes additional keyword arguments which can be used in the message
format string.
:keyword project: project name
:type project: string
:keyword version: project version
:type version: string
"""
self.project = kwargs.pop('project', 'unknown')
self.version = kwargs.pop('version', 'unknown')
self.conf = kwargs.pop('config', _CONF)
logging.Formatter.__init__(self, *args, **kwargs)
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(jecarey): If msg is not unicode, coerce it into unicode
# before it can get to the python logging and
# possibly cause string encoding trouble
if not isinstance(record.msg, six.text_type):
record.msg = six.text_type(record.msg)
# store project info
record.project = self.project
record.version = self.version
# FIXME(dims): We need a better way to pick up the instance
# or instance_uuid parameters from the kwargs from say
# LOG.info or LOG.warn
instance_extra = ''
instance = getattr(record, 'instance', None)
instance_uuid = getattr(record, 'instance_uuid', None)
context = _update_record_with_context(record)
if instance:
try:
instance_extra = (self.conf.instance_format
% instance)
except TypeError:
instance_extra = instance
elif instance_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': instance_uuid})
elif context:
# FIXME(dhellmann): We should replace these nova-isms with
# more generic handling in the Context class. See the
# app-agnostic-logging-parameters blueprint.
instance = getattr(context, 'instance', None)
instance_uuid = getattr(context, 'instance_uuid', None)
# resource_uuid was introduced in oslo_context's
# RequestContext
resource_uuid = getattr(context, 'resource_uuid', None)
if instance:
instance_extra = (self.conf.instance_format
% {'uuid': instance})
elif instance_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': instance_uuid})
elif resource_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': resource_uuid})
record.instance = instance_extra
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity', 'resource',
'user_name', 'project_name'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id'):
fmt = self.conf.logging_context_format_string
else:
fmt = self.conf.logging_default_format_string
if (record.levelno == logging.DEBUG and
self.conf.logging_debug_format_suffix):
fmt += " " + self.conf.logging_debug_format_suffix
if sys.version_info < (3, 2):
self._fmt = fmt
else:
self._style = logging.PercentStyle(fmt)
self._fmt = self._style._fmt
# Cache this on the record, Logger will respect our formatted copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if self.conf.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = self.conf.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
| |
"""tensor.py
Constants and functions having to deal with 3D symmetric and nonsymmetric
second order tensors fourth-order tensors with minor and major symmetries.
Since there are a limited number of shapes allowed for input arrays, many of
the functions are hard coded for speed (no need to call a generalized inverse
function, when the inverse of a 3x3 can be hard coded easily.)
All of the functions are written with the following assumptions:
o Symmetric second-order tensors are stored as arrays of length 6 with the
following component ordering
[XX, YY, ZZ, XY, YZ, XZ]
o Nonsymmetric second-order tensors are stored as arrays of length 9 with the
following component ordering
[XX, XY, XZ, YX, YY, YZ, ZX, ZY, ZZ]
or
[[XX, XY, XZ], [YX, YY, YZ], [ZX, ZY, ZZ]]
o Fourth-order tensors are stored as 6x6 matrices using the same component
transformations as second-order symmetric tensors
"""
import warnings
import numpy as np
from copy import deepcopy as copy
from .environ import environ
import matmodlab2.core.linalg as la
# Fourth-order "identities"
# II1: I[i,j] I[k,l]
II1 = np.array([1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 1.0,
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).reshape((6,6))
# II2: I[i,k] I[j,l]
II2 = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]).reshape((6,6))
# II3: I[i,l] I[j,k]
II3 = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]).reshape((6,6))
II4 = (II2 + II3) / 2
# II5 = (I[i,k] I[j,l] + I[i,l] I[j,k]) / 2
II5 = np.array([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5]).reshape((6,6))
# Second-order identities
I6 = np.array([1., 1., 1., 0., 0., 0.])
I9 = np.array([1., 0., 0., 0., 1., 0., 0., 0., 1.])
I3x3 = np.eye(3)
SYMMETRIC_COMPONENTS = ['XX', 'YY', 'ZZ', 'XY', 'YZ', 'XZ']
TENSOR_COMPONENTS = ['XX', 'XY', 'XZ', 'YX', 'YY', 'YZ', 'ZX', 'ZY', 'ZZ']
VOIGT = np.array([1., 1., 1., 2., 2., 2.])
epsilon = np.finfo(float).eps
def has_valid_shape(A):
return is_valid_shape(A.shape)
def is_valid_shape(shape):
"""Return whether the shape is valid for these set of functions"""
return shape in ((6,),(9,),(3,3),(6,6))
def identity(n):
"""Return an identity tensor according to n"""
if n not in (3, 6, 9):
raise ValueError('Unknown identity size {0}'.format(n))
if n == 3:
return np.eye(3)
if n == 6:
return np.array([1.,1.,1.,0.,0.,0.])
if n == 9:
return np.array([1.,0.,0.,0.,1.,0.,0.,0.,1.])
def identity_like(A):
"""Return an identity matrix like A"""
A = np.asarray(A)
assert has_valid_shape(A)
return identity(A.shape[0])
def trace(A, metric=None):
"""Return trace of A"""
A = np.asarray(A)
assert has_valid_shape(A)
if metric is None:
metric = identity_like(A)
X = np.array(metric)
else:
X = inv(metric)
return double_dot(A, metric)
def isotropic_part(A, metric=None):
"""Return isotropic part of A"""
A = np.asarray(A)
assert has_valid_shape(A)
if metric is None:
metric = identity_like(A)
X = np.array(metric)
else:
X = inv(metric)
return trace(A, metric=metric) / 3. * X
def deviatoric_part(A, metric=None):
"""Return deviatoric part of A"""
return A - isotropic_part(A, metric=metric)
def is_symmetric_rep(A):
return A.shape == (6,)
def symmetric_dyad(A, B):
"""Compute the symmetric dyad AB_ij = A_i B_j"""
A = np.asarray(A)
B = np.asarray(B)
assert A.shape == (3,)
assert A.shape == B.shape
return np.array([A[0] * B[0], A[1] * B[1], A[2] * B[2],
A[0] * B[1], A[1] * B[2], A[0] * B[2]])
def root_j2(A):
""" Return the square root of the second invariant of the
deviatoric part of the matrix.
"""
return magnitude(deviatoric_part(A)) / np.sqrt(2.)
def invariants(A, type=None, n=None):
"""Return the invariants of a tensor A
The type parameter is one of 'default' (None), 'mechanics', 'lode',
'directed'. Multiple types of invariants can be returned by joining
different types with &, ie, to get the mechanics and lode invariants do
type='lode&mechanics'
"""
A = np.asarray(A)
assert has_valid_shape(A)
anyin = lambda a, b: any([x in b for x in a])
valid_types = ('default', 'mechanics', 'lode', 'directed')
if type is None:
type = 'default'
types = [x.strip() for x in type.split('&') if x.split()]
assert [x in valid_types for x in types]
if 'directed' in types and n in None:
raise ValueError('type=directed requires n be defined')
dikt = {}
if anyin(('mechanics', 'lode'), types):
dikt['i1'] = trace(A)
dikt['rootj2'] = magnitude(deviatoric_part(A)) / np.sqrt(2.)
dikt['j3'] = det(deviatoric_part(A))
if 'lode' in types:
dikt['r'] = np.sqrt(2.) * dikt['rootj2']
dikt['z'] = dikt['i1'] / np.sqrt(3.)
if abs(dikt['rootj2']) < epsilon:
dikt['lode'] = 0.
else:
dikt['lode'] = dikt['j3'] / 2.0 * 3.0 ** 1.5 / dikt['rootj2'] ** 3.0
dikt['theta'] = np.arcsin(max(-1.0, min(dikt['lode'], 1.0))) / 3.0
if anyin(('default', 'directed'), types):
A = matrix_rep(A, 0)
asq = np.dot(A, A)
deta = la.det(A)
tra = trace(A)
dikt['i1'] = tra
dikt['i2'] = .5 * (tra ** 2 - trace(asq))
dikt['i3'] = deta
if 'directed' in types:
dikt['i4'] = np.dot(np.dot(n, A), n)
dikt['i4'] = np.dot(np.dot(n, asq), n)
if len(types) > 1:
# For composite types, just return the dictionary
return dikt
if types[0] == 'default':
return dikt['i1'], dikt['i2'], dikt['i3']
if types[0] == 'directed':
return dikt['i1'], dikt['i2'], dikt['i3'], dikt['i4'], dikt['i5']
if types[0] == 'mechanics':
return dikt['i1'], dikt['rootj2'], dikt['j3']
if types[0] == 'lode':
return dikt['z'], dikt['r'], dikt['theta'], dikt['lode']
return None
def magnitude(A):
"""Return magnitude of A"""
return np.sqrt(double_dot(A, A))
def dot(A, B):
"""Dot product of A and B"""
A, B = np.asarray(A), np.asarray(B)
assert has_valid_shape(A)
assert has_valid_shape(B)
if A.shape == (6,) and B.shape == (6,):
return A * B * VOIGT
if A.shape == (6,) and B.shape == (3,3):
return np.dot(matrix_rep(A, 0), B)
if A.shape == (3,3) and B.shape == (6,):
return np.dot(A, matrix_rep(B, 0))
if A.shape == (3,3) and B.shape == (3,3):
return np.dot(A, B)
if A.shape == (6,6) and B.shape == (6,):
return np.dot(A, B)
if A.shape == (6,) and B.shape == (6,6):
return np.dot(A, B)
if A.shape == (9,) and B.shape == (9,):
return np.dot(A.reshape(3,3), B.reshape(3,3)).flatten()
print(A.shape, B.shape)
raise ValueError('Unknown dot combination')
def double_dot(A, B):
"""Return A:B"""
A, B = np.asarray(A), np.asarray(B)
assert has_valid_shape(A)
assert has_valid_shape(B)
if A.shape == (6,6) and B.shape == (6,):
return np.dot(A, B)
if A.shape == (6,) and B.shape == (6,6):
return np.dot(A, B)
A, B = A.reshape(-1), B.reshape(-1)
if B.shape == (6,) and A.shape == (9,):
A, B = B, A
if A.shape == (6,) and B.shape == (6,):
X = (A[0]*B[0] + A[3]*B[3] + A[5]*B[5] + A[3]*B[3] + A[1]*B[1] +
A[4]*B[4] + A[5]*B[5] + A[4]*B[4] + A[2]*B[2])
elif A.shape == (6,) and B.shape == (9,):
X = (A[0]*B[0] + A[3]*B[1] + A[5]*B[2] + A[3]*B[3] + A[1]*B[4] +
A[4]*B[5] + A[5]*B[6] + A[4]*B[7] + A[2]*B[8])
elif A.shape == (9,) and B.shape == (9,):
X = (A[0]*B[0] + A[1]*B[1] + A[2]*B[2] + A[3]*B[3] + A[4]*B[4] +
A[5]*B[5] + A[6]*B[6] + A[7]*B[7] + A[8]*B[8])
return X
def det(A):
""" Computes the determininant of A"""
A = np.asarray(A)
assert has_valid_shape(A)
if A.shape == (6,):
X = (A[0]*A[1]*A[2] - A[0]*A[4]**2 - A[1]*A[5]**2 - A[2]*A[3]**2 +
2*A[3]*A[4]*A[5])
elif A.shape in ((3,3), (9,)):
A = A.reshape(-1)
X = (A[0]*A[4]*A[8] - A[0]*A[5]*A[7] - A[1]*A[3]*A[8] + A[1]*A[5]*A[6]
+ A[2]*A[3]*A[7] - A[2]*A[4]*A[6])
else:
raise ValueError('Unknown shape')
return X
def inv(A):
"""Computes the inverse of A"""
A = np.asarray(A)
assert has_valid_shape(A)
orig_shape = A.shape
if A.shape == (3,3):
if is_symmetric(A):
A = array_rep(A, (6,))
else:
A = A.reshape(-1)
if A.shape == (6,):
X = np.zeros(6)
X[0] = (-(A[0]*A[1] - A[3]**2)*(-A[3]*(A[4] -
A[3]*A[5]/A[0])/(A[0]*(A[1] - A[3]**2/A[0])) +
A[5]/A[0])*(A[3]*(A[4] - A[3]*A[5]/A[0])/(A[0]*(A[1] -
A[3]**2/A[0])) - A[5]/A[0])/(A[0]*A[1]*A[2] - A[0]*A[4]**2 -
A[1]*A[5]**2 - A[2]*A[3]**2 + 2*A[3]*A[4]*A[5]) + 1/A[0] +
A[3]**2/(A[0]**2*(A[1] - A[3]**2/A[0])))
X[1] = (1/(A[1] - A[3]**2/A[0]) + (A[4] -
A[3]*A[5]/A[0])**2*(A[0]*A[1] - A[3]**2)/((A[1] -
A[3]**2/A[0])**2*(A[0]*A[1]*A[2] - A[0]*A[4]**2 - A[1]*A[5]**2
- A[2]*A[3]**2 + 2*A[3]*A[4]*A[5])))
X[2] = ((A[0]*A[1] - A[3]**2)/(A[0]*A[1]*A[2] - A[0]*A[4]**2 -
A[1]*A[5]**2 - A[2]*A[3]**2 + 2*A[3]*A[4]*A[5]))
X[3] = ((A[4] - A[3]*A[5]/A[0])*(A[0]*A[1] - A[3]**2)*(-A[3]*(A[4] -
A[3]*A[5]/A[0])/(A[0]*(A[1] - A[3]**2/A[0])) +
A[5]/A[0])/((A[1] - A[3]**2/A[0])*(A[0]*A[1]*A[2] -
A[0]*A[4]**2 - A[1]*A[5]**2 - A[2]*A[3]**2 +
2*A[3]*A[4]*A[5])) - A[3]/(A[0]*(A[1] - A[3]**2/A[0])))
X[4] = (-(A[4] - A[3]*A[5]/A[0])*(A[0]*A[1] - A[3]**2)/((A[1] -
A[3]**2/A[0])*(A[0]*A[1]*A[2] - A[0]*A[4]**2 - A[1]*A[5]**2 -
A[2]*A[3]**2 + 2*A[3]*A[4]*A[5])))
X[5] = (-(A[0]*A[1] - A[3]**2)*(-A[3]*(A[4] -
A[3]*A[5]/A[0])/(A[0]*(A[1] - A[3]**2/A[0])) +
A[5]/A[0])/(A[0]*A[1]*A[2] - A[0]*A[4]**2 - A[1]*A[5]**2 -
A[2]*A[3]**2 + 2*A[3]*A[4]*A[5]))
elif A.shape == (9,):
X = np.zeros(9)
X[0] = (-(A[0]*A[4] - A[1]*A[3])*(-A[1]*(A[5] -
A[2]*A[3]/A[0])/(A[0]*(A[4] - A[1]*A[3]/A[0])) +
A[2]/A[0])*(A[3]*(A[7] - A[1]*A[6]/A[0])/(A[0]*(A[4] -
A[1]*A[3]/A[0])) - A[6]/A[0])/(A[0]*A[4]*A[8] - A[0]*A[5]*A[7]
- A[1]*A[3]*A[8] + A[1]*A[5]*A[6] + A[2]*A[3]*A[7] -
A[2]*A[4]*A[6]) + 1/A[0] + A[1]*A[3]/(A[0]**2*(A[4] -
A[1]*A[3]/A[0])))
X[1] = ((A[7] - A[1]*A[6]/A[0])*(A[0]*A[4] - A[1]*A[3])*(-A[1]*(A[5] -
A[2]*A[3]/A[0])/(A[0]*(A[4] - A[1]*A[3]/A[0])) +
A[2]/A[0])/((A[4] - A[1]*A[3]/A[0])*(A[0]*A[4]*A[8] -
A[0]*A[5]*A[7] - A[1]*A[3]*A[8] + A[1]*A[5]*A[6] +
A[2]*A[3]*A[7] - A[2]*A[4]*A[6])) - A[1]/(A[0]*(A[4] -
A[1]*A[3]/A[0])))
X[2] = (-(A[0]*A[4] - A[1]*A[3])*(-A[1]*(A[5] -
A[2]*A[3]/A[0])/(A[0]*(A[4] - A[1]*A[3]/A[0])) +
A[2]/A[0])/(A[0]*A[4]*A[8] - A[0]*A[5]*A[7] - A[1]*A[3]*A[8] +
A[1]*A[5]*A[6] + A[2]*A[3]*A[7] - A[2]*A[4]*A[6]))
X[3] = (-(A[5] - A[2]*A[3]/A[0])*(A[0]*A[4] - A[1]*A[3])*(A[3]*(A[7] -
A[1]*A[6]/A[0])/(A[0]*(A[4] - A[1]*A[3]/A[0])) -
A[6]/A[0])/((A[4] - A[1]*A[3]/A[0])*(A[0]*A[4]*A[8] -
A[0]*A[5]*A[7] - A[1]*A[3]*A[8] + A[1]*A[5]*A[6] +
A[2]*A[3]*A[7] - A[2]*A[4]*A[6])) - A[3]/(A[0]*(A[4] -
A[1]*A[3]/A[0])))
X[4] = (1/(A[4] - A[1]*A[3]/A[0]) + (A[5] - A[2]*A[3]/A[0])*(A[7] -
A[1]*A[6]/A[0])*(A[0]*A[4] - A[1]*A[3])/((A[4] -
A[1]*A[3]/A[0])**2*(A[0]*A[4]*A[8] - A[0]*A[5]*A[7] -
A[1]*A[3]*A[8] + A[1]*A[5]*A[6] + A[2]*A[3]*A[7] -
A[2]*A[4]*A[6])))
X[5] = (-(A[5] - A[2]*A[3]/A[0])*(A[0]*A[4] - A[1]*A[3])/((A[4] -
A[1]*A[3]/A[0])*(A[0]*A[4]*A[8] - A[0]*A[5]*A[7] -
A[1]*A[3]*A[8] + A[1]*A[5]*A[6] + A[2]*A[3]*A[7] -
A[2]*A[4]*A[6])))
X[6] = ((A[0]*A[4] - A[1]*A[3])*(A[3]*(A[7] -
A[1]*A[6]/A[0])/(A[0]*(A[4] - A[1]*A[3]/A[0])) -
A[6]/A[0])/(A[0]*A[4]*A[8] - A[0]*A[5]*A[7] - A[1]*A[3]*A[8] +
A[1]*A[5]*A[6] + A[2]*A[3]*A[7] - A[2]*A[4]*A[6]))
X[7] = (-(A[7] - A[1]*A[6]/A[0])*(A[0]*A[4] - A[1]*A[3])/((A[4] -
A[1]*A[3]/A[0])*(A[0]*A[4]*A[8] - A[0]*A[5]*A[7] -
A[1]*A[3]*A[8] + A[1]*A[5]*A[6] + A[2]*A[3]*A[7] -
A[2]*A[4]*A[6])))
X[8] = ((A[0]*A[4] - A[1]*A[3])/(A[0]*A[4]*A[8] - A[0]*A[5]*A[7] -
A[1]*A[3]*A[8] + A[1]*A[5]*A[6] + A[2]*A[3]*A[7] -
A[2]*A[4]*A[6]))
else:
raise ValueError('Unknown shape')
if X.shape == orig_shape:
return X
return matrix_rep(X, 0)
def expm(A):
"""Compute the matrix exponential of a 3x3 matrix"""
mat, orig_shape = matrix_rep(A)
mat2 = la.expm(mat)
return array_rep(mat2, orig_shape)
def logm(A):
"""Compute the matrix logarithm of a 3x3 matrix"""
mat, orig_shape = matrix_rep(A)
mat2 = la.logm(mat)
return array_rep(mat2, orig_shape)
def powm(A, t):
"""Compute the matrix power of a 3x3 matrix"""
mat, orig_shape = matrix_rep(A)
mat2 = la.powm(mat)
return array_rep(mat2, orig_shape)
def sqrtm(A):
"""Compute the square root of a 3x3 matrix"""
mat, orig_shape = matrix_rep(A)
mat2 = la.sqrtm(mat)
return array_rep(mat2, orig_shape)
def matrix_rep(A, disp=1):
"""Convert array to matrix"""
A = np.asarray(A)
assert has_valid_shape(A)
orig_shape = A.shape
if orig_shape == (6,):
ix1 = ([0,1,2,0,1,0,1,2,2],[0,1,2,1,2,2,0,0,1])
ix2 = [0,1,2,3,4,5,3,5,4]
mat = np.zeros((3,3))
mat[ix1] = A[ix2]
elif orig_shape == (9,):
mat = np.reshape(A, (3,3))
elif orig_shape == (3,3):
mat = np.array(A)
else:
raise ValueError('Unknown shape')
if not disp:
return mat
return mat, orig_shape
def array_rep(mat, shape):
"""Reverse of matrix_rep"""
mat = np.asarray(mat)
if mat.shape == (6,):
return mat
if shape == (6,):
mat = .5 * (mat + mat.T)
return mat[([0,1,2,0,1,0],[0,1,2,1,2,2])]
if shape == (9,):
return np.ndarray.flatten(mat)
if shape == (3,3):
return np.array(mat)
raise ValueError('Unknown shape')
def symmetric_part(A):
"""Symmetric part of A"""
A = np.asarray(A)
assert A.shape in ((6,), (9,), (3,3))
if A.shape == (6,):
return A
elif A.shape == (9,):
A = A.reshape((3,3))
symA = .5 * (A + A.T)
return symA[([0,1,2,0,1,0],[0,1,2,1,2,2])]
def isdiag(A):
"""Determines if a matrix is diagonal."""
A = np.asarray(A)
assert has_valid_shape(A)
if A.shape == (6.):
return np.all(np.abs(A[3:])<=epsilon)
elif A.shape == (9,):
return np.all(np.abs(A[[0,4,8]])<=epsilon)
elif A.shape == (3,3):
return np.all(np.abs(A[([0,0,1,1,2,2],[1,2,0,2,0,1])])<=epsilon)
raise ValueError('Unknown shape')
def symsq(F):
""" Computes dot(F.T, F)"""
X = np.zeros(6)
F = np.asarray(F).reshape(-1)
assert F.shape == (9,)
X[0] = F[0]**2 + F[3]**2 + F[6]**2
X[1] = F[1]**2 + F[4]**2 + F[7]**2
X[2] = F[2]**2 + F[5]**2 + F[8]**2
X[3] = F[0]*F[1] + F[3]*F[4] + F[6]*F[7]
X[4] = F[1]*F[2] + F[4]*F[5] + F[7]*F[8]
X[5] = F[0]*F[2] + F[3]*F[5] + F[6]*F[8]
return X
def unrotate(R, A):
return push(R.T, A)
def rotate(R, A):
return push(R, A)
def pull(F, A):
return push(inv(F), A)
def push(F, A):
"""Computes the push operation F A F.T / J"""
F = np.asarray(F).reshape(-1)
assert F.shape == (9,)
A = np.asarray(A)
assert A.shape in ((3,3), (6,), (6,6))
if A.shape == (3,3):
assert is_symmetric(A)
A = array_rep(A, (6,))
if A.shape == (6,):
return push6(F, A)
elif A.shape == (6,6):
return push66(F, A)
raise ValueError('Unknown shape')
def push6(F, A):
X = np.zeros(6)
X[0] = (F[0]*(A[0]*F[0] + A[3]*F[1] + A[5]*F[2]) + F[1]*(A[1]*F[1] +
A[3]*F[0] + A[4]*F[2]) + F[2]*(A[2]*F[2] + A[4]*F[1] + A[5]*F[0]))
X[1] = (F[3]*(A[0]*F[3] + A[3]*F[4] + A[5]*F[5]) + F[4]*(A[1]*F[4] +
A[3]*F[3] + A[4]*F[5]) + F[5]*(A[2]*F[5] + A[4]*F[4] + A[5]*F[3]))
X[2] = (F[6]*(A[0]*F[6] + A[3]*F[7] + A[5]*F[8]) + F[7]*(A[1]*F[7] +
A[3]*F[6] + A[4]*F[8]) + F[8]*(A[2]*F[8] + A[4]*F[7] + A[5]*F[6]))
X[3] = (F[3]*(A[0]*F[0] + A[3]*F[1] + A[5]*F[2]) + F[4]*(A[1]*F[1] +
A[3]*F[0] + A[4]*F[2]) + F[5]*(A[2]*F[2] + A[4]*F[1] + A[5]*F[0]))
X[4] = (F[6]*(A[0]*F[3] + A[3]*F[4] + A[5]*F[5]) + F[7]*(A[1]*F[4] +
A[3]*F[3] + A[4]*F[5]) + F[8]*(A[2]*F[5] + A[4]*F[4] + A[5]*F[3]))
X[5] = (F[6]*(A[0]*F[0] + A[3]*F[1] + A[5]*F[2]) + F[7]*(A[1]*F[1] +
A[3]*F[0] + A[4]*F[2]) + F[8]*(A[2]*F[2] + A[4]*F[1] + A[5]*F[0]))
return X / det(F)
def push66(F, A):
Q = symleaf(F)
X = np.dot(np.dot(Q, A), Q.T)
return X / det(F)
def polar_decomp(F):
return la.polar_decomp(F)
def is_symmetric(A):
A = np.asarray(A)
if A.shape == (6,):
return True
if A.shape == (9,):
A = A.reshape((3,3))
return np.allclose([A[0,1],A[1,2],A[0,2]], [A[1,0],A[2,1],A[2,0]])
def dyad(A, B):
"""Computes the outer product of A and B"""
A, B = np.asarray(A), np.asarray(B)
assert has_valid_shape(A)
assert has_valid_shape(B)
if A.shape == (3,3) and is_symmetric(A):
A = array_rep(A, (6,))
if B.shape == (3,3) and is_symmetric(B):
B = array_rep(B, (6,))
if A.shape == (6,) and B.shape == (6,):
X = np.zeros((6,6))
X[0,0] = A[0] * B[0]
X[0,1] = A[0] * B[1]
X[0,2] = A[0] * B[2]
X[0,3] = A[0] * B[3]
X[0,4] = A[0] * B[4]
X[0,5] = A[0] * B[5]
X[1,0] = A[1] * B[0]
X[1,1] = A[1] * B[1]
X[1,2] = A[1] * B[2]
X[1,3] = A[1] * B[3]
X[1,4] = A[1] * B[4]
X[1,5] = A[1] * B[5]
X[2,0] = A[2] * B[0]
X[2,1] = A[2] * B[1]
X[2,2] = A[2] * B[2]
X[2,3] = A[2] * B[3]
X[2,4] = A[2] * B[4]
X[2,5] = A[2] * B[5]
X[3,0] = A[3] * B[0]
X[3,1] = A[3] * B[1]
X[3,2] = A[3] * B[2]
X[3,3] = A[3] * B[3]
X[3,4] = A[3] * B[4]
X[3,5] = A[3] * B[5]
X[4,0] = A[4] * B[0]
X[4,1] = A[4] * B[1]
X[4,2] = A[4] * B[2]
X[4,3] = A[4] * B[3]
X[4,4] = A[4] * B[4]
X[4,5] = A[4] * B[5]
X[5,0] = A[5] * B[0]
X[5,1] = A[5] * B[1]
X[5,2] = A[5] * B[2]
X[5,3] = A[5] * B[3]
X[5,4] = A[5] * B[4]
X[5,5] = A[5] * B[5]
elif A.shape == (3,) and B.shape == (3,):
X = np.zeros(6)
X[0] = A[0] * B[0]
X[1] = A[1] * B[1]
X[2] = A[2] * B[2]
X[3] = A[0] * B[1]
X[4] = A[1] * B[2]
X[5] = A[0] * B[2]
else:
raise ValueError('Unknown shape')
return X
def symshuffle(A, B):
""" Computes the product Xijkl = .5 (Aik Bjl + Ail Bjk)"""
A, B = np.asarray(A), np.asarray(B)
assert has_valid_shape(A)
assert has_valid_shape(B)
if A.shape == (3,3) and is_symmetric(A):
A = array_rep(A, (6,))
if B.shape == (3,3) and is_symmetric(B):
B = array_rep(B, (6,))
X = np.zeros((6,6))
if A.shape == (6,) and B.shape == (6,):
X[0,0] = (A[0] * B[0] + A[0] * B[0]) / 2.
X[0,1] = (A[3] * B[3] + A[3] * B[3]) / 2.
X[0,2] = (A[5] * B[5] + A[5] * B[5]) / 2.
X[0,3] = (A[0] * B[3] + A[3] * B[0]) / 2.
X[0,4] = (A[3] * B[5] + A[5] * B[3]) / 2.
X[0,5] = (A[0] * B[5] + A[5] * B[0]) / 2.
X[1,0] = (A[3] * B[3] + A[3] * B[3]) / 2.
X[1,1] = (A[1] * B[1] + A[1] * B[1]) / 2.
X[1,2] = (A[4] * B[4] + A[4] * B[4]) / 2.
X[1,3] = (A[3] * B[1] + A[1] * B[3]) / 2.
X[1,4] = (A[1] * B[4] + A[4] * B[1]) / 2.
X[1,5] = (A[3] * B[4] + A[4] * B[3]) / 2.
X[2,0] = (A[5] * B[5] + A[5] * B[5]) / 2.
X[2,1] = (A[4] * B[4] + A[4] * B[4]) / 2.
X[2,2] = (A[2] * B[2] + A[2] * B[2]) / 2.
X[2,3] = (A[5] * B[4] + A[4] * B[5]) / 2.
X[2,4] = (A[4] * B[2] + A[2] * B[4]) / 2.
X[2,5] = (A[5] * B[2] + A[2] * B[5]) / 2.
X[3,0] = (A[0] * B[3] + A[0] * B[3]) / 2.
X[3,1] = (A[3] * B[1] + A[3] * B[1]) / 2.
X[3,2] = (A[5] * B[4] + A[5] * B[4]) / 2.
X[3,3] = (A[0] * B[1] + A[3] * B[3]) / 2.
X[3,4] = (A[3] * B[4] + A[5] * B[1]) / 2.
X[3,5] = (A[0] * B[4] + A[5] * B[3]) / 2.
X[4,0] = (A[3] * B[5] + A[3] * B[5]) / 2.
X[4,1] = (A[1] * B[4] + A[1] * B[4]) / 2.
X[4,2] = (A[4] * B[2] + A[4] * B[2]) / 2.
X[4,3] = (A[3] * B[4] + A[1] * B[5]) / 2.
X[4,4] = (A[1] * B[2] + A[4] * B[4]) / 2.
X[4,5] = (A[3] * B[2] + A[4] * B[5]) / 2.
X[5,0] = (A[0] * B[5] + A[0] * B[5]) / 2.
X[5,1] = (A[3] * B[4] + A[3] * B[4]) / 2.
X[5,2] = (A[5] * B[2] + A[5] * B[2]) / 2.
X[5,3] = (A[0] * B[4] + A[3] * B[5]) / 2.
X[5,4] = (A[3] * B[2] + A[5] * B[4]) / 2.
X[5,5] = (A[0] * B[2] + A[5] * B[5]) / 2.
else:
raise ValueError('Unknown shape')
return X
def symleaf(F):
""" COMPUTE A 6X6 MANDEL MATRIX THAT IS THE SYM-LEAF TRANSFORMATION OF THE
INPUT 3X3 MATRIX F.
Parameters
----------
F : ANY 3X3 MATRIX (IN CONVENTIONAL 3X3 STORAGE)
Returns
-------
X : 6X6 MANDEL MATRIX FOR THE SYM-LEAF TRANSFORMATION MATRIX
Notes
-----
IF A IS ANY SYMMETRIC TENSOR, AND IF {A} IS ITS 6X1 MANDEL ARRAY, THEN THE
6X1 MANDEL ARRAY FOR THE TENSOR B=F.A.TRANSPOSE[F] MAY BE COMPUTED BY
{B}=[FF]{A}
IF F IS A DEFORMATION F, THEN B IS THE "PUSH" (SPATIAL) TRANSFORMATION OF
THE REFERENCE TENSOR A IF F IS Inverse[F], THEN B IS THE "PULL"
(REFERENCE) TRANSFORMATION OF THE SPATIAL TENSOR A, AND THEREFORE B WOULD
BE Inverse[FF]{A}.
IF F IS A ROTATION, THEN B IS THE ROTATION OF A, AND
FF WOULD BE BE A 6X6 ORTHOGONAL MATRIX, JUST AS IS F
"""
F = np.asarray(F).reshape(-1)
assert F.shape == (9,)
X = np.zeros((6,6))
X[0,0] = F[0] * F[0]
X[0,1] = F[1] * F[1]
X[0,2] = F[2] * F[2]
X[0,3] = F[0] * F[1] + F[1] * F[0]
X[0,4] = F[1] * F[2] + F[2] * F[1]
X[0,5] = F[0] * F[2] + F[2] * F[0]
X[1,0] = F[3] * F[3]
X[1,1] = F[4] * F[4]
X[1,2] = F[5] * F[5]
X[1,3] = F[3] * F[4] + F[4] * F[3]
X[1,4] = F[4] * F[5] + F[5] * F[4]
X[1,5] = F[3] * F[5] + F[5] * F[3]
X[2,0] = F[6] * F[6]
X[2,1] = F[7] * F[7]
X[2,2] = F[8] * F[8]
X[2,3] = F[6] * F[7] + F[7] * F[6]
X[2,4] = F[7] * F[8] + F[8] * F[7]
X[2,5] = F[6] * F[8] + F[8] * F[6]
X[3,0] = F[0] * F[3] + F[3] * F[0]
X[3,1] = F[1] * F[4] + F[4] * F[1]
X[3,2] = F[2] * F[5] + F[5] * F[2]
X[3,3] = F[0] * F[4] + F[1] * F[3]
X[3,4] = F[1] * F[5] + F[2] * F[4]
X[3,5] = F[0] * F[5] + F[2] * F[3]
X[4,0] = F[3] * F[6] + F[6] * F[3]
X[4,1] = F[4] * F[7] + F[7] * F[4]
X[4,2] = F[5] * F[8] + F[8] * F[5]
X[4,3] = F[3] * F[7] + F[4] * F[6]
X[4,4] = F[4] * F[8] + F[5] * F[7]
X[4,5] = F[3] * F[8] + F[5] * F[6]
X[5,0] = F[0] * F[6] + F[6] * F[0]
X[5,1] = F[1] * F[7] + F[7] * F[1]
X[5,2] = F[2] * F[8] + F[8] * F[2]
X[5,3] = F[0] * F[7] + F[1] * F[6]
X[5,4] = F[1] * F[8] + F[2] * F[7]
X[5,5] = F[0] * F[8] + F[2] * F[6]
return X
| |
from nose.tools import * # flake8: noqa
from website.util import permissions
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import ProjectFactory
from tests.factories import AuthUserFactory
from tests.factories import PrivateLinkFactory
from website.models import Node
class ViewOnlyTestCase(ApiTestCase):
def setUp(self):
super(ViewOnlyTestCase, self).setUp()
self.creation_user = AuthUserFactory()
self.viewing_user = AuthUserFactory()
self.contributing_read_user = AuthUserFactory()
self.contributing_write_user = AuthUserFactory()
self.valid_contributors = [
self.creation_user._id,
self.contributing_read_user._id,
self.contributing_write_user._id,
]
self.private_node_one = ProjectFactory(is_public=False, creator=self.creation_user, title="Private One")
self.private_node_one.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.private_node_one.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.private_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
self.private_node_one_anonymous_link.nodes.append(self.private_node_one)
self.private_node_one_anonymous_link.save()
self.private_node_one_private_link = PrivateLinkFactory(anonymous=False)
self.private_node_one_private_link.nodes.append(self.private_node_one)
self.private_node_one_private_link.save()
self.private_node_one_url = '/{}nodes/{}/'.format(API_BASE, self.private_node_one._id)
self.private_node_two = ProjectFactory(is_public=False, creator=self.creation_user, title="Private Two")
self.private_node_two.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.private_node_two.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.private_node_two_url = '/{}nodes/{}/'.format(API_BASE, self.private_node_two._id)
self.public_node_one = ProjectFactory(is_public=True, creator=self.creation_user, title="Public One")
self.public_node_one.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.public_node_one.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.public_node_one_anonymous_link = PrivateLinkFactory(anonymous=True)
self.public_node_one_anonymous_link.nodes.append(self.public_node_one)
self.public_node_one_anonymous_link.save()
self.public_node_one_private_link = PrivateLinkFactory(anonymous=False)
self.public_node_one_private_link.nodes.append(self.public_node_one)
self.public_node_one_private_link.save()
self.public_node_one_url = '/{}nodes/{}/'.format(API_BASE, self.public_node_one._id)
self.public_node_two = ProjectFactory(is_public=True, creator=self.creation_user, title="Public Two")
self.public_node_two.add_contributor(self.contributing_read_user, permissions=[permissions.READ], save=True)
self.public_node_two.add_contributor(self.contributing_write_user, permissions=[permissions.WRITE], save=True)
self.public_node_two_url = '/{}nodes/{}/'.format(API_BASE, self.public_node_two._id)
def tearDown(self):
Node.remove()
class TestNodeDetailViewOnlyLinks(ViewOnlyTestCase):
def test_private_node_with_link_works_when_using_link(self):
res_normal = self.app.get(self.private_node_one_url, auth=self.contributing_read_user.auth)
assert_equal(res_normal.status_code, 200)
res_linked = self.app.get(self.private_node_one_url, {'view_only': self.private_node_one_private_link.key})
assert_equal(res_linked.status_code, 200)
assert_items_equal(res_linked.json['data']['attributes']['current_user_permissions'], ['read'])
assert_equal(res_linked.json, res_normal.json)
def test_private_node_with_link_unauthorized_when_not_using_link(self):
res = self.app.get(self.private_node_one_url, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_node_with_link_anonymous_does_not_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_equal(contributor['id'], '')
def test_private_node_with_link_non_anonymous_does_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'].split('-')[1], self.valid_contributors)
def test_private_node_logged_in_with_anonymous_link_does_not_expose_contributor_id(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
}, auth=self.creation_user.auth)
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'].split('-')[1], self.valid_contributors)
def test_public_node_with_link_anonymous_does_not_expose_user_id(self):
res = self.app.get(self.public_node_one_url, {
'view_only': self.public_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_equal(contributor['id'], '')
def test_public_node_with_link_non_anonymous_does_expose_contributor_id(self):
res = self.app.get(self.public_node_one_url, {
'view_only': self.public_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'].split('-')[1], self.valid_contributors)
def test_public_node_with_link_unused_does_expose_contributor_id(self):
res = self.app.get(self.public_node_one_url, {
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
contributors = res.json['data']['embeds']['contributors']['data']
for contributor in contributors:
assert_in(contributor['id'].split('-')[1], self.valid_contributors)
def test_view_only_link_does_not_grant_write_permission(self):
payload = {
'data': {
'attributes': {
'title': 'Cannot touch this' },
'id': self.private_node_one._id,
'type': 'nodes',
}
}
res = self.app.patch_json_api(self.private_node_one_url, payload, {
'view_only': self.private_node_one_private_link.key,
}, expect_errors=True)
assert_equal(res.status_code, 401)
def test_view_only_link_from_anther_project_does_not_grant_view_permission(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.public_node_one_private_link.key,
}, expect_errors=True)
assert_equal(res.status_code, 401)
def test_private_project_logs_with_anonymous_link_does_not_expose_user_id(self):
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': self.private_node_one_anonymous_link.key,
})
assert_equal(res.status_code, 200)
body = res.body
assert_not_in(self.contributing_write_user._id, body)
assert_not_in(self.contributing_read_user._id, body)
assert_not_in(self.creation_user._id, body)
def test_private_project_with_anonymous_link_does_not_expose_registrations_or_forks(self):
res = self.app.get(self.private_node_one_url, {
'view_only': self.private_node_one_anonymous_link.key,
})
assert_equal(res.status_code, 200)
relationships = res.json['data']['relationships']
if 'embeds' in res.json['data']:
embeds = res.json['data']['embeds']
else:
embeds = {}
assert_not_in('registrations', relationships)
assert_not_in('forks', relationships, 'Add forks view to blacklist in hide_view_when_anonymous().')
assert_not_in('registrations', embeds)
assert_not_in('forks', embeds, 'Add forks view to blacklist in hide_view_when_anonymous().')
def test_bad_view_only_link_does_not_modify_permissions(self):
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, expect_errors=True)
assert_equal(res.status_code, 401)
res = self.app.get(self.private_node_one_url+'logs/', {
'view_only': 'thisisnotarealprivatekey',
}, auth=self.creation_user.auth)
assert_equal(res.status_code, 200)
class TestNodeListViewOnlyLinks(ViewOnlyTestCase):
def test_private_link_does_not_show_node_in_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_private_link.key,
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
node_ids = []
for node in nodes:
node_ids.append(node['id'])
assert_not_in(self.private_node_one._id, node_ids)
def test_anonymous_link_does_not_show_contributor_id_in_node_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_anonymous_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
assertions = 0
for node in nodes:
contributors = node['embeds']['contributors']['data']
for contributor in contributors:
assertions += 1
assert_equal(contributor['id'], '')
assert_not_equal(assertions, 0)
def test_non_anonymous_link_does_show_contributor_id_in_node_list(self):
res = self.app.get('/{}nodes/'.format(API_BASE), {
'view_only': self.private_node_one_private_link.key,
'embed': 'contributors',
})
assert_equal(res.status_code, 200)
nodes = res.json['data']
assertions = 0
for node in nodes:
contributors = node['embeds']['contributors']['data']
for contributor in contributors:
assertions += 1
assert_in(contributor['id'].split('-')[1], self.valid_contributors)
assert_not_equal(assertions, 0)
| |
#!/usr/bin/env python
import sys
import codecs
import re
import os
import time
import urllib2
class Result(object):
'''Results are returned as objects, this is the return object of the
ExtractUrl.extract() method'''
domValid = True
tld = None
domain = None
query = None
subdomains = []
def setFoundSubdomains(self, subdomains):
self.subdomains = subdomains
def getFoundSubdomains(self):
return self.subdomains
def setTld(self, tld):
self.tld = tld
def getTld(self):
return self.tld
def setDomain(self, domain):
self.domain = domain
def getDomain(self):
return self.domain
def setValid(self, valid):
self.domValid = valid
def setUrlQuery(self, query):
self.query = query
def getUrlQuery(self):
return self.query
def getHostname(self):
hostname = []
subdomains = self.getFoundSubdomains()
if subdomains:
[hostname.append(x) for x in subdomains]
hostname.append(self.getDomain())
hostname.append(self.getTld())
return '.'.join(hostname)
def valid(self):
return self.domValid
class SuffixList(object):
suffixList = set()
punyTranslator = None
maxAge = None
def __init__(self, maxAge, saveDir, punyTranslator):
self.punyTranslator = punyTranslator
self.maxAge = maxAge
self.saveDir = saveDir
self.fileLocation = self.generateLocation(saveDir, 'tlds.dat')
def loadList(self):
file = self.getFileLocation()
if not self.fileExists(file) or not self.fresh(file, self.maxAge):
self.downloadList(file)
with open(self.getFileLocation(), 'r') as listHandler:
for line in listHandler:
line = line.strip()
if line and line[0] != '/':
if self.punyTranslator.hasUtf(line):
line = self.punyTranslator.encode(line)
self.suffixList.add(line)
else:
self.suffixList.add(line)
def generateLocation(self, saveDir, listName):
if saveDir:
fileLocation = '%s/%s' % (saveDir, listName)
else:
fileLocation = '%s/%s' % (os.getcwd(), listName)
return fileLocation
def getFileLocation(self):
return self.fileLocation
def fresh(self, file, maxAge):
fresh = True
if os.path.getmtime(file) + maxAge < time.time():
fresh = False
return fresh
def fileExists(self, file):
exists = True
try:
with open(file, 'r'):
pass
except Exception:
exists = False
return exists
def downloadList(self, to):
print 'Downloading list...'
location = 'https://publicsuffix.org/list/effective_tld_names.dat'
get = urllib2.urlopen(location)
with open(to, 'w') as handler:
handler.write(get.read())
def getList(self):
return self.suffixList
def generateSufixesList(self, hostname):
suffixes = []
splitted = hostname.split('.')
for i in xrange(0, len(splitted)):
levels = splitted[i:]
searchFor = '.'.join(levels)
suffixes.append('!' + searchFor)
suffixes.append('*.' + searchFor)
suffixes.append(searchFor)
return suffixes
def searchSuffix(self, hostname):
generatedDomainSuffixes = self.generateSufixesList(hostname)
splittedHostname = hostname.split('.')
foundSuffix = None
for suffix in generatedDomainSuffixes:
if suffix in self.getList():
if suffix.startswith('!'):
splitted = suffix.split('.')
foundSuffix = '.'.join(splitted[1:])
break
elif suffix.startswith('*'):
suffixLen = len(suffix.split('.'))
splittedHostname = list(reversed(splittedHostname))
index = suffixLen-1
if index >= len(splittedHostname):
break
host = splittedHostname[index]
suffix = suffix.replace('*', host)
foundSuffix = suffix
break
else:
foundSuffix = suffix
break
return foundSuffix
class DomainPunyTranslator(object):
def hasUtf(self, string):
return not all(ord(c) < 128 for c in string)
def encode(self, string):
result = []
for chunk in string.split('.'):
if self.hasUtf(chunk):
chunk = 'xn--' + codecs.encode(chunk.decode('utf8'),
'punycode')
result.append(chunk)
return '.'.join(result)
def decode(self, url):
decodedChunks = []
for chunk in url.split('.'):
if self.isPunyEncoded(chunk):
punycode = '--'.join(chunk.split('--')[1:])
chunk = codecs.decode(punycode, 'punycode')
decodedChunks.append(chunk)
url = '.'.join(decodedChunks).encode('utf8')
return url
def isPunyEncoded(self, url):
puny = False
if url.find('xn--') != -1:
puny = True
return puny
class UrlExtract(object):
alwaysPuny = False
suffixList = None
punyTranslator = None
def __init__(self, datFileMaxAge=86400*31,
datFileSaveDir=None, alwaysPuny=False):
self.alwaysPuny = alwaysPuny
self.punyTranslator = DomainPunyTranslator()
self.suffixList = SuffixList(datFileMaxAge,
datFileSaveDir, self.punyTranslator)
self.suffixList.loadList()
def trimUrl(self, url):
url = url.lower().strip()
url = url.replace('http://', '')
url = url.replace('https://', '')
return url
def validDomain(self, domain):
valid = True
for hostname in domain.split('.'):
#encode to puny, because if domain is in utf8 and the len is wrong
hostname = self.punyTranslator.encode(hostname)
if len(hostname) > 63:
valid = False
elif hostname.startswith('-'):
valid = False
elif hostname.endswith('-'):
valid = False
elif hostname.find(' ') != -1:
valid = False
return valid
def normalizeEncoding(result, utfInput, punyInput):
pass
def extract(self, url):
url = self.trimUrl(url)
punyInput = self.punyTranslator.isPunyEncoded(url)
utfInput = self.punyTranslator.hasUtf(url)
urlChunks = url.split('/')
hostname = urlChunks[0]
if utfInput:
hostname = self.punyTranslator.encode(hostname)
foundSuffix = self.suffixList.searchSuffix(hostname)
urlQuery = None
valid = True
foundSubdomains = []
withoutTld = None
if len(urlChunks) > 1:
urlQuery = '/'.join(urlChunks[1:])
if foundSuffix is None:
valid = False
else:
withoutTld = re.sub(re.escape(foundSuffix)+'$', '', hostname)
withoutTld = withoutTld[:-1]
if withoutTld == '' or not self.validDomain(hostname):
valid = False
withoutTldSplitted = withoutTld.split('.')
if len(withoutTldSplitted) > 1:
subdomains = withoutTldSplitted
withoutTld = withoutTldSplitted[-1]
subdomains.remove(withoutTld)
foundSubdomains = subdomains
if utfInput:
withoutTld = self.punyTranslator.decode(withoutTld)
foundSuffix = self.punyTranslator.decode(foundSuffix)
foundSubdomains = map(self.punyTranslator.decode, foundSubdomains)
if punyInput or self.alwaysPuny:
withoutTld = self.punyTranslator.encode(withoutTld)
foundSuffix = self.punyTranslator.encode(foundSuffix)
foundSubdomains = map(self.punyTranslator.encode, foundSubdomains)
result = Result()
if valid:
result.setValid(True)
result.setDomain(withoutTld)
result.setTld(foundSuffix)
result.setFoundSubdomains(foundSubdomains)
result.setUrlQuery(urlQuery)
else:
result.setValid(False)
return result
if __name__ == '__main__':
url = sys.argv[1]
extract = UrlExtract()
extracted = extract.extract(url)
print extracted.getDomain()
print extracted.getTld()
print extracted.getFoundSubdomains()
print 'hostname', extracted.getHostname()
| |
"""Precompute coefficients of several series expansions
of Wright's generalized Bessel function Phi(a, b, x).
See https://dlmf.nist.gov/10.46.E1 with rho=a, beta=b, z=x.
"""
from argparse import ArgumentParser, RawTextHelpFormatter
import numpy as np
from scipy.integrate import quad
from scipy.optimize import minimize_scalar, curve_fit
from time import time
try:
import sympy
from sympy import EulerGamma, Rational, S, Sum, \
factorial, gamma, gammasimp, pi, polygamma, symbols, zeta
from sympy.polys.polyfuncs import horner
except ImportError:
pass
def series_small_a():
"""Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.
"""
order = 5
a, b, x, k = symbols("a b x k")
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas)
# Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i])
expression = Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
expression = gamma(b)/sympy.exp(x) * expression
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(horner((term/x_part).simplify()))
s = "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\n"
s += "Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\n"
for name, c in zip(['A', 'X', 'B'], [A, X, B]):
for i in range(len(c)):
s += f"\n{name}[{i}] = " + str(c[i])
return s
# expansion of digamma
def dg_series(z, n):
"""Symbolic expansion of digamma(z) in z=0 to order n.
See https://dlmf.nist.gov/5.7.E4 and with https://dlmf.nist.gov/5.5.E2
"""
k = symbols("k")
return -1/z - EulerGamma + \
sympy.summation((-1)**k * zeta(k) * z**(k-1), (k, 2, n+1))
def pg_series(k, z, n):
"""Symbolic expansion of polygamma(k, z) in z=0 to order n."""
return sympy.diff(dg_series(z, n+k), z, k)
def series_small_a_small_b():
"""Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5.
Be aware of cancellation of poles in b=0 of digamma(b)/Gamma(b) and
polygamma functions.
digamma(b)/Gamma(b) = -1 - 2*M_EG*b + O(b^2)
digamma(b)^2/Gamma(b) = 1/b + 3*M_EG + b*(-5/12*PI^2+7/2*M_EG^2) + O(b^2)
polygamma(1, b)/Gamma(b) = 1/b + M_EG + b*(1/12*PI^2 + 1/2*M_EG^2) + O(b^2)
and so on.
"""
order = 5
a, b, x, k = symbols("a b x k")
M_PI, M_EG, M_Z3 = symbols("M_PI M_EG M_Z3")
c_subs = {pi: M_PI, EulerGamma: M_EG, zeta(3): M_Z3}
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas expanded)
C = [] # terms that generate B
# Phi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i])
# B[0] = 1
# B[k] = sum(C[k] * b**k/k!, k=0..)
# Note: C[k] can be obtained from a series expansion of 1/gamma(b).
expression = gamma(b)/sympy.exp(x) * \
Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
# expansion of polygamma part with 1/gamma(b)
pg_part = term/x_part/gamma(b)
if n >= 1:
# Note: highest term is digamma^n
pg_part = pg_part.replace(polygamma,
lambda k, x: pg_series(k, x, order+1+n))
pg_part = (pg_part.series(b, 0, n=order+1-n)
.removeO()
.subs(polygamma(2, 1), -2*zeta(3))
.simplify()
)
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(pg_part)
# Calculate C and put in the k!
C = sympy.Poly(B[1].subs(c_subs), b).coeffs()
C.reverse()
for i in range(len(C)):
C[i] = (C[i] * factorial(i)).simplify()
s = "Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5."
s += "\nPhi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i], i=0..5)\n"
s += "B[0] = 1\n"
s += "B[i] = sum(C[k+i-1] * b**k/k!, k=0..)\n"
s += "\nM_PI = pi"
s += "\nM_EG = EulerGamma"
s += "\nM_Z3 = zeta(3)"
for name, c in zip(['A', 'X'], [A, X]):
for i in range(len(c)):
s += f"\n{name}[{i}] = "
s += str(c[i])
# For C, do also compute the values numerically
for i in range(len(C)):
s += f"\n# C[{i}] = "
s += str(C[i])
s += f"\nC[{i}] = "
s += str(C[i].subs({M_EG: EulerGamma, M_PI: pi, M_Z3: zeta(3)})
.evalf(17))
# Does B have the assumed structure?
s += "\n\nTest if B[i] does have the assumed structure."
s += "\nC[i] are derived from B[1] allone."
s += "\nTest B[2] == C[1] + b*C[2] + b^2/2*C[3] + b^3/6*C[4] + .."
test = sum([b**k/factorial(k) * C[k+1] for k in range(order-1)])
test = (test - B[2].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
s += "\nTest B[3] == C[2] + b*C[3] + b^2/2*C[4] + .."
test = sum([b**k/factorial(k) * C[k+2] for k in range(order-2)])
test = (test - B[3].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
return s
def asymptotic_series():
"""Asymptotic expansion for large x.
Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k
Z = (a*x)^(1/(1+a))
Wright (1935) lists the coefficients C_0 and C_1 (he calls them a_0 and
a_1). With slightly different notation, Paris (2017) lists coefficients
c_k up to order k=3.
Paris (2017) uses ZP = (1+a)/a * Z (ZP = Z of Paris) and
C_k = C_0 * (-a/(1+a))^k * c_k
"""
order = 8
class g(sympy.Function):
"""Helper function g according to Wright (1935)
g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...)
Note: Wright (1935) uses square root of above definition.
"""
nargs = 3
@classmethod
def eval(cls, n, rho, v):
if not n >= 0:
raise ValueError("must have n >= 0")
elif n == 0:
return 1
else:
return g(n-1, rho, v) \
+ gammasimp(gamma(rho+2+n)/gamma(rho+2)) \
/ gammasimp(gamma(3+n)/gamma(3))*v**n
class coef_C(sympy.Function):
"""Calculate coefficients C_m for integer m.
C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of
Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b)
* g(rho, v)^(-m-1/2)
"""
nargs = 3
@classmethod
def eval(cls, m, rho, beta):
if not m >= 0:
raise ValueError("must have m >= 0")
v = symbols("v")
expression = (1-v)**(-beta) * g(2*m, rho, v)**(-m-Rational(1, 2))
res = expression.diff(v, 2*m).subs(v, 0) / factorial(2*m)
res = res * (gamma(m + Rational(1, 2)) / (2*pi)
* (2/(rho+1))**(m + Rational(1, 2)))
return res
# in order to have nice ordering/sorting of expressions, we set a = xa.
xa, b, xap1 = symbols("xa b xap1")
C0 = coef_C(0, xa, b)
# a1 = a(1, rho, beta)
s = "Asymptotic expansion for large x\n"
s += "Phi(a, b, x) = Z**(1/2-b) * exp((1+a)/a * Z) \n"
s += " * sum((-1)**k * C[k]/Z**k, k=0..6)\n\n"
s += "Z = pow(a * x, 1/(1+a))\n"
s += "A[k] = pow(a, k)\n"
s += "B[k] = pow(b, k)\n"
s += "Ap1[k] = pow(1+a, k)\n\n"
s += "C[0] = 1./sqrt(2. * M_PI * Ap1[1])\n"
for i in range(1, order+1):
expr = (coef_C(i, xa, b) / (C0/(1+xa)**i)).simplify()
factor = [x.denominator() for x in sympy.Poly(expr).coeffs()]
factor = sympy.lcm(factor)
expr = (expr * factor).simplify().collect(b, sympy.factor)
expr = expr.xreplace({xa+1: xap1})
s += f"C[{i}] = C[0] / ({factor} * Ap1[{i}])\n"
s += f"C[{i}] *= {str(expr)}\n\n"
import re
re_a = re.compile(r'xa\*\*(\d+)')
s = re_a.sub(r'A[\1]', s)
re_b = re.compile(r'b\*\*(\d+)')
s = re_b.sub(r'B[\1]', s)
s = s.replace('xap1', 'Ap1[1]')
s = s.replace('xa', 'a')
# max integer = 2^31-1 = 2,147,483,647. Solution: Put a point after 10
# or more digits.
re_digits = re.compile(r'(\d{10,})')
s = re_digits.sub(r'\1.', s)
return s
def optimal_epsilon_integral():
"""Fit optimal choice of epsilon for integral representation.
The integrand of
int_0^pi P(eps, a, b, x, phi) * dphi
can exhibit oscillatory behaviour. It stems from the cosine of P and can be
minimized by minimizing the arc length of the argument
f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a * phi) + (1 - b) * phi
of cos(f(phi)).
We minimize the arc length in eps for a grid of values (a, b, x) and fit a
parametric function to it.
"""
def fp(eps, a, b, x, phi):
"""Derivative of f w.r.t. phi."""
eps_a = np.power(1. * eps, -a)
return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b
def arclength(eps, a, b, x, epsrel=1e-2, limit=100):
"""Compute Arc length of f.
Note that the arg length of a function f fro t0 to t1 is given by
int_t0^t1 sqrt(1 + f'(t)^2) dt
"""
return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi)**2),
0, np.pi,
epsrel=epsrel, limit=100)[0]
# grid of minimal arc length values
data_a = [1e-3, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8]
data_b = [0, 1, 4, 7, 10]
data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1e3, 5e3, 1e4]
data_a, data_b, data_x = np.meshgrid(data_a, data_b, data_x)
data_a, data_b, data_x = (data_a.flatten(), data_b.flatten(),
data_x.flatten())
best_eps = []
for i in range(data_x.size):
best_eps.append(
minimize_scalar(lambda eps: arclength(eps, data_a[i], data_b[i],
data_x[i]),
bounds=(1e-3, 1000),
method='Bounded', options={'xatol': 1e-3}).x
)
best_eps = np.array(best_eps)
# pandas would be nice, but here a dictionary is enough
df = {'a': data_a,
'b': data_b,
'x': data_x,
'eps': best_eps,
}
def func(data, A0, A1, A2, A3, A4, A5):
"""Compute parametric function to fit."""
a = data['a']
b = data['b']
x = data['x']
return (A0 * b * np.exp(-0.5 * a)
+ np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a)
+ A4 / (1 + np.exp(A5 * a))))
func_params = list(curve_fit(func, df, df['eps'], method='trf')[0])
s = "Fit optimal eps for integrand P via minimal arc length\n"
s += "with parametric function:\n"
s += "optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n"
s += " - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n"
s += "Fitted parameters A0 to A5 are:\n"
s += ', '.join(['{:.5g}'.format(x) for x in func_params])
return s
def main():
t0 = time()
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('action', type=int, choices=[1, 2, 3, 4],
help='chose what expansion to precompute\n'
'1 : Series for small a\n'
'2 : Series for small a and small b\n'
'3 : Asymptotic series for large x\n'
' This may take some time (>4h).\n'
'4 : Fit optimal eps for integral representation.'
)
args = parser.parse_args()
switch = {1: lambda: print(series_small_a()),
2: lambda: print(series_small_a_small_b()),
3: lambda: print(asymptotic_series()),
4: lambda: print(optimal_epsilon_integral())
}
switch.get(args.action, lambda: print("Invalid input."))()
print("\n{:.1f} minutes elapsed.\n".format((time() - t0)/60))
if __name__ == '__main__':
main()
| |
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fuel 6.1 migration
Revision ID: 37608259013
Revises: 1b1d4016375d
Create Date: 2014-12-16 11:35:19.872214
"""
# revision identifiers, used by Alembic.
revision = '37608259013'
down_revision = '1b1d4016375d'
from alembic import op
from oslo.serialization import jsonutils
import sqlalchemy as sa
from sqlalchemy.sql import text
from nailgun.db.sqlalchemy.models import fields
from nailgun.utils.migration import drop_enum
from nailgun.utils.migration import move_orchestrator_data_to_attributes
from nailgun.utils.migration import \
upgrade_6_0_to_6_1_plugins_cluster_attrs_use_ids_mapping
from nailgun.utils.migration import upgrade_attributes_metadata_6_0_to_6_1
from nailgun.utils.migration import upgrade_enum
from nailgun.utils.migration import upgrade_master_node_settings_6_0_to_6_1
from nailgun.utils.migration import upgrade_networks_metadata_to_6_1
from nailgun.utils.migration import upgrade_role_limits_6_0_to_6_1
from nailgun.utils.migration import upgrade_role_restrictions_6_0_to_6_1
release_states_old = (
'not_available',
'downloading',
'error',
'available',
)
release_states_new = (
'available',
'unavailable',
)
cluster_changes_old = (
'networks',
'attributes',
'disks',
'interfaces',
)
cluster_changes_new = (
'networks',
'attributes',
'disks',
'interfaces',
'vmware_attributes'
)
bond_modes_old = (
'active-backup',
'balance-slb',
'lacp-balance-tcp',
)
bond_modes_new = (
# both
'active-backup',
# OVS
'balance-slb',
'lacp-balance-tcp',
# linux
'balance-rr',
'balance-xor',
'broadcast',
'802.3ad',
'balance-tlb',
'balance-alb',
)
node_statuses_old = (
'ready',
'discover',
'provisioning',
'provisioned',
'deploying',
'error',
)
node_statuses_new = (
'ready',
'discover',
'provisioning',
'provisioned',
'deploying',
'error',
'removing',
)
def upgrade():
upgrade_schema()
upgrade_data()
def downgrade():
downgrade_data()
downgrade_schema()
def upgrade_schema():
connection = op.get_bind()
vrouter_enum = sa.Enum('vrouter',
name='network_vip_types')
vrouter_enum.create(op.get_bind(), checkfirst=False)
op.add_column(
'ip_addrs',
sa.Column('vip_type', vrouter_enum, nullable=True)
)
op.add_column(
'clusters',
sa.Column('deployment_tasks', fields.JSON(), nullable=True))
op.add_column(
'node_nic_interfaces',
sa.Column('driver', sa.Text(), nullable=True))
op.add_column(
'node_nic_interfaces',
sa.Column('bus_info', sa.Text(), nullable=True))
op.add_column(
'releases',
sa.Column('deployment_tasks', fields.JSON(), nullable=True))
op.add_column(
'releases',
sa.Column('vmware_attributes_metadata', fields.JSON(), nullable=True))
op.create_table(
'vmware_attributes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('cluster_id', sa.Integer()),
sa.Column('editable', fields.JSON()),
sa.ForeignKeyConstraint(['cluster_id'], ['clusters.id'], ),
sa.PrimaryKeyConstraint('id'))
upgrade_enum(
'releases', # table
'state', # column
'release_state', # ENUM name
release_states_old, # old options
release_states_new, # new options
)
upgrade_enum(
"cluster_changes", # table
"name", # column
"possible_changes", # ENUM name
cluster_changes_old, # old options
cluster_changes_new # new options
)
upgrade_enum(
"nodes", # table
"status", # column
"node_status", # ENUM name
node_statuses_old, # old options
node_statuses_new # new options
)
# OpenStack workload statistics
op.create_table('oswl_stats',
sa.Column('id', sa.Integer, nullable=False),
sa.Column(
'cluster_id',
sa.Integer,
nullable=False,
index=True
),
sa.Column(
'created_date',
sa.Date,
nullable=False,
index=True
),
sa.Column(
'updated_time',
sa.Time,
nullable=False
),
sa.Column(
'resource_type',
sa.Enum('vm',
'tenant',
'volume',
'security_group',
'keystone_user',
'flavor',
'cluster_stats',
'image',
name='oswl_resource_type'),
nullable=False,
index=True
),
sa.Column(
'resource_data',
fields.JSON(),
nullable=True
),
sa.Column(
'resource_checksum',
sa.Text,
nullable=True
),
sa.Column(
'is_sent',
sa.Boolean,
nullable=False,
default=False,
index=True
),
sa.PrimaryKeyConstraint('id'))
op.drop_constraint('node_roles_node_fkey', 'node_roles')
op.create_foreign_key(
'node_roles_node_fkey', 'node_roles', 'nodes', ['node'], ['id'],
ondelete='CASCADE')
op.drop_constraint('pending_node_roles_node_fkey', 'pending_node_roles')
op.create_foreign_key(
'pending_node_roles_node_fkey', 'pending_node_roles', 'nodes',
['node'], ['id'], ondelete='CASCADE')
op.drop_constraint('node_attributes_node_id_fkey', 'node_attributes')
op.create_foreign_key(
'node_attributes_node_id_fkey', 'node_attributes', 'nodes',
['node_id'], ['id'], ondelete='CASCADE')
# Introduce linux bonds
upgrade_enum(
'node_bond_interfaces', # table
'mode', # column
'bond_mode', # ENUM name
bond_modes_old, # old options
bond_modes_new, # new options
)
# Add bond properties
op.drop_column('node_bond_interfaces', 'flags')
op.add_column(
'node_bond_interfaces',
sa.Column('bond_properties',
fields.JSON(),
nullable=False,
server_default='{}'))
# Add interface properties
op.add_column(
'node_nic_interfaces',
sa.Column('interface_properties',
fields.JSON(),
nullable=False,
server_default='{}'))
op.add_column(
'node_bond_interfaces',
sa.Column('interface_properties',
fields.JSON(),
nullable=False,
server_default='{}'))
move_orchestrator_data_to_attributes(connection)
op.drop_table('release_orchestrator_data')
# Plugins migrations
op.add_column(
'plugins',
sa.Column(
'groups', fields.JSON(), nullable=False, server_default='[]'))
op.add_column(
'plugins',
sa.Column(
'authors', fields.JSON(), nullable=False, server_default='[]'))
op.add_column(
'plugins',
sa.Column(
'licenses', fields.JSON(), nullable=False, server_default='[]'))
op.add_column(
'plugins',
sa.Column('homepage', sa.Text(), nullable=True))
def downgrade_schema():
# Add interface properties
op.drop_column('node_bond_interfaces', 'interface_properties')
op.drop_column('node_nic_interfaces', 'interface_properties')
# Add bond properties
op.drop_column('node_bond_interfaces', 'bond_properties')
op.add_column(
'node_bond_interfaces',
sa.Column('flags', fields.JSON(), nullable=True))
# Introduce linux bonds
upgrade_enum(
'node_bond_interfaces', # table
'mode', # column
'bond_mode', # ENUM name
bond_modes_new, # new options
bond_modes_old, # old options
)
# OpenStack workload statistics
op.drop_table('oswl_stats')
drop_enum('oswl_resource_type')
upgrade_enum(
"cluster_changes", # table
"name", # column
"possible_changes", # ENUM name
cluster_changes_new, # new options
cluster_changes_old, # old options
)
upgrade_enum(
'releases', # table
'state', # column
'release_state', # ENUM name
release_states_new, # new options
release_states_old, # old options
)
upgrade_enum(
"nodes", # table
"status", # column
"node_status", # ENUM name
node_statuses_new, # old options
node_statuses_old # new options
)
op.create_table(
'release_orchestrator_data',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('release_id', sa.Integer(), nullable=False),
sa.Column('repo_metadata', fields.JSON(), nullable=False),
sa.Column(
'puppet_manifests_source', sa.Text(), nullable=False),
sa.Column(
'puppet_modules_source', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(['release_id'], ['releases.id'], ),
sa.PrimaryKeyConstraint('id'))
op.drop_table('vmware_attributes')
op.drop_column('releases', 'vmware_attributes_metadata')
op.drop_column('clusters', 'deployment_tasks')
op.drop_column('node_nic_interfaces', 'driver')
op.drop_column('node_nic_interfaces', 'bus_info')
op.drop_column('releases', 'deployment_tasks')
op.drop_constraint('node_roles_node_fkey', 'node_roles')
op.create_foreign_key(
'node_roles_node_fkey', 'node_roles', 'nodes', ['node'], ['id'])
op.drop_constraint('pending_node_roles_node_fkey', 'pending_node_roles')
op.create_foreign_key(
'pending_node_roles_node_fkey', 'pending_node_roles', 'nodes',
['node'], ['id'])
op.drop_constraint('node_attributes_node_id_fkey', 'node_attributes')
op.create_foreign_key(
'node_attributes_node_id_fkey', 'node_attributes', 'nodes',
['node_id'], ['id'])
op.drop_column('ip_addrs', 'vip_type')
drop_enum('network_vip_types')
# Plugins table changes
op.drop_column('plugins', 'groups')
op.drop_column('plugins', 'authors')
op.drop_column('plugins', 'licenses')
op.drop_column('plugins', 'homepage')
def upgrade_data():
connection = op.get_bind()
select = text(
"""SELECT id, roles_metadata, attributes_metadata, networks_metadata
from releases""")
update = text(
"""UPDATE releases
SET roles_metadata = :roles, attributes_metadata = :attrs,
networks_metadata = :networks
WHERE id = :id""")
r = connection.execute(select)
for release in r:
roles_meta = upgrade_role_limits_6_0_to_6_1(
jsonutils.loads(release[1]),
_limits_to_update)
roles_meta = upgrade_role_restrictions_6_0_to_6_1(
roles_meta,
_new_role_restrictions)
attributes_meta = upgrade_attributes_metadata_6_0_to_6_1(
jsonutils.loads(release[2]))
networks_meta = upgrade_networks_metadata_to_6_1(
jsonutils.loads(release[3]), _bonding_metadata)
connection.execute(
update,
id=release[0],
roles=jsonutils.dumps(roles_meta),
attrs=jsonutils.dumps(attributes_meta),
networks=jsonutils.dumps(networks_meta),
)
upgrade_master_node_settings(connection)
upgrade_6_0_to_6_1_plugins_cluster_attrs_use_ids_mapping(connection)
def downgrade_data():
pass
def upgrade_master_node_settings(connection):
select = text(
"SELECT master_node_uid, settings FROM master_node_settings"
)
update = text(
"""UPDATE master_node_settings
SET settings = :settings
WHERE master_node_uid = :uid""")
masters = connection.execute(select)
for master in masters:
settings = upgrade_master_node_settings_6_0_to_6_1(
jsonutils.loads(master[1]))
connection.execute(
update,
uid=master[0],
settings=jsonutils.dumps(settings))
_limits_to_update = {
'controller': {
'min': 1,
'overrides': [
{
'condition': "cluster:mode == 'multinode'",
'max': 1,
'message': (
"Multi-node environment can not have more than "
"one controller node.")
},
{
'condition': "cluster:mode == 'ha_compact'",
'recommended': 3,
'message': (
"At least 3 controller nodes are recommended for "
"HA deployment.")
}
]
},
'compute': {
'recommended': 1
},
'cinder': {
'overrides': [
{
'condition': "settings:storage.volumes_lvm.value == true",
'recommended': 1,
'message': (
"At least 1 Cinder node is recommended when "
"Cinder LVM is selected")
}
]
},
'ceph-osd': {
'overrides': [
{
'condition': "settings:storage.volumes_ceph.value == true",
'min': "settings:storage.osd_pool_size.value"
},
{
'condition': "settings:storage.images_ceph.value == true",
'min': 1
}
]
},
'mongo': {
'min': 1,
'overrides': [
{
'condition': "cluster:mode != 'ha_compact'",
'max': 1,
'message': (
"At most 1 MongoDB node can be added for non-HA "
"deployment")
},
{
'condition': "cluster:mode == 'ha_compact'",
'recommended': 3,
'message': (
"At least 3 MongoDB nodes are recommended for HA "
"deployment.")
}
]
},
'zabbix-server': {
'max': 1
}
}
_new_role_restrictions = {
'compute': [
{
'condition': "settings:common.libvirt_type.value == 'vcenter'",
'message': (
"Computes cannot be added to environment with "
"vCenter hypervisor")
}
],
'cinder': [
{
'condition': "settings:storage.volumes_lvm.value == false",
'message': "Cinder LVM should be enabled in settings"
},
# NOTE: https://bugs.launchpad.net/fuel/+bug/1372914 - Prohibit
# possibility of adding cinder nodes to an environment with Ceph RBD
{
'condition': "settings:storage.volumes_ceph.value == true",
'message': "Ceph RBD cannot be used with Cinder"
}
],
'ceph-osd': [
{
'condition': (
"settings:common.libvirt_type.value == 'vcenter' "
"and settings:storage.images_ceph.value == false"),
'message': "Ceph RBD for images should be enabled in settings."
},
# NOTE: we want a disjoint condition from the one with vCenter so user
# will not get 2 messages at once in case when vCenter is selected
# and images_ceph.value is false
{
'condition': (
"settings:common.libvirt_type.value != 'vcenter' "
"and settings:storage.volumes_ceph.value == false "
"and settings:storage.images_ceph.value == false"),
'message': "Ceph must be enabled in settings"
}
]
}
_bonding_metadata = {
"availability": [
{"ovs": "'experimental' in version:feature_groups and "
"cluster:net_provider != 'neutron' and "
"settings:storage.iser.value == false and "
"settings:neutron_mellanox.plugin.value != 'ethernet'"},
{"linux": "false"}
],
"properties": {
"ovs": {
"mode": [
{
"values": ["active-backup", "balance-slb",
"lacp-balance-tcp"]
}
]
}
}
}
| |
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import datetime
import freezegun
import os
import random
from ggrc import app # noqa
from ggrc import db
from ggrc_workflows import models
from ggrc_workflows import start_recurring_cycles
from ggrc_workflows.services.workflow_cycle_calculator import \
get_cycle_calculator
from integration.ggrc_workflows.workflow_cycle_calculator import \
base_workflow_test_case
if os.environ.get('TRAVIS', False):
random.seed(1) # so we can reproduce the tests if needed
class TestAnnuallyWorkflow(base_workflow_test_case.BaseWorkflowTestCase):
def test_annually_workflow(self):
"""Basic annual workflow test.
"""
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annual task 1',
"relative_start_day": 10, # 6/10/2015 Wed
"relative_start_month": 6,
"relative_end_day": 25, # 6/25/2015 Thu
"relative_end_month": 6,
},
{
'title': 'annual task 2',
"relative_start_day": 15, # 6/15/2015 Mon
"relative_start_month": 6,
"relative_end_day": 9, # 8/9/2015 Sun
"relative_end_month": 8,
}],
"task_group_objects": self.random_objects
},
]
}
with freezegun.freeze_time("2015-6-8 13:00:00"): # Mon, 6/8/2015
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2015, 6, 10))
with freezegun.freeze_time("2015-6-10 13:00:00"): # Mon, 6/8/2015
start_recurring_cycles()
cycle = db.session.query(models.Cycle).filter(
models.Cycle.workflow_id == wf.id).one()
self.assertEqual(cycle.start_date, datetime.date(2015, 6, 10))
# Because end date is on Sunday, relative start day will have to be
# adjusted
self.assertEqual(cycle.end_date, datetime.date(2015, 8, 7))
_, cycle = self.generator.generate_cycle(wf) # 2016
_, cycle = self.generator.generate_cycle(wf) # 2017
_, cycle = self.generator.generate_cycle(wf) # 2018
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2019, 6, 10))
self.assertEqual(cycle.start_date, datetime.date(2018, 6, 8))
self.assertEqual(cycle.end_date, datetime.date(2018, 8, 9))
def test_type_casting(self):
"""Verify type casting for string input
Test if string values get converted correctly to integers
and arithmetic works"""
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [],
"task_group_objects": self.random_objects
},
]
}
task = {
'title': 'annual task 1',
"relative_start_day": "10", # 6/10/2015 Wed
"relative_start_month": "6",
"relative_end_day": "25", # 6/25/2015 Thu
"relative_end_month": "6",
}
with freezegun.freeze_time("2015-7-1 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
task_group = db.session.query(models.TaskGroup).filter(
models.TaskGroup.workflow_id == wf.id).one()
_, tgt = self.generator.generate_task_group_task(task_group, data=task)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2016, 6, 10))
with freezegun.freeze_time("2016-6-10 13:00"):
start_recurring_cycles()
cycle = db.session.query(models.Cycle).filter(
models.Cycle.workflow_id == wf.id).one()
self.assertEqual(cycle.start_date, datetime.date(2016, 6, 10))
self.assertEqual(cycle.end_date, datetime.date(
2016, 6, 24)) # 6/25/2015 is Sat
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2017, 6, 9)) # 6/10/2017 is Sat
def test_task_order(self):
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annual task 1',
"relative_start_day": 21, # 6/21/2015
"relative_start_month": 6,
"relative_end_day": 25, # 6/25/2015 Thu
"relative_end_month": 6,
},
{
'title': 'annual task 2',
"relative_start_day": 11, # 6/11/2015 Thu
"relative_start_month": 6,
"relative_end_day": 16, # 6/16/2015 Tue
"relative_end_month": 6,
},
{
'title': 'annual task 6',
"relative_start_day": 2, # 7/2/2015 Thu
"relative_start_month": 7,
"relative_end_day": 15, # 7/15/2015 Wed
"relative_end_month": 7,
},
{
'title': 'annual task 3',
"relative_start_day": 3, # 6/3/2015 Wed
"relative_start_month": 6,
"relative_end_day": 15, # 6/15/2015 Mon
"relative_end_month": 6,
},
{
'title': 'annual task 4',
"relative_start_day": 8, # 6/8/2015 Mon
"relative_start_month": 6,
"relative_end_day": 15, # 6/15/2015 Mon
"relative_end_month": 6,
},
{
'title': 'annual task 5',
"relative_start_day": 2, # 7/2/2015 Thu
"relative_start_month": 6,
"relative_end_day": 15, # 6/15/2015 Mon
"relative_end_month": 6,
}],
"task_group_objects": self.random_objects
},
]
}
with freezegun.freeze_time("2015-06-01 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
calculator = get_cycle_calculator(active_wf)
self.assertEqual([2, 3, 8, 11, 21, 2], [
task.relative_start_day for task in calculator.tasks])
def test_adding_task_with_lesser_start_day_after_activating_workflow(self):
"""Test if NCSD gets updated correctly if user adds new task with lesser
relative start day after workflow has already been activated."""
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annually task 1',
"relative_start_day": 30,
"relative_start_month": 7,
"relative_end_day": 7,
"relative_end_month": 8,
}],
"task_group_objects": []
},
]
}
task = {
'title': 'annually task 2',
"relative_start_day": 20,
"relative_start_month": 7,
"relative_end_day": 22,
"relative_end_month": 7,
}
with freezegun.freeze_time("2015-07-27 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2015, 7, 30))
_, cycle = self.generator.generate_cycle(wf)
self.assertEqual(cycle.start_date, datetime.date(2015, 7, 30))
self.assertEqual(cycle.end_date, datetime.date(2015, 8, 7))
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2016, 7, 29))
# We add another task that starts on 20th
task_group = db.session.query(models.TaskGroup).filter(
models.TaskGroup.workflow_id == wf.id).one()
_, tgt = self.generator.generate_task_group_task(task_group, data=task)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2016, 7, 20))
def test_start_workflow_mid_cycle_with_task_before_and_after(self):
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annually task 1',
"relative_start_day": 1,
"relative_start_month": 7,
"relative_end_day": 1,
"relative_end_month": 7,
}, {
'title': 'annually task 2',
"relative_start_day": 2,
"relative_start_month": 7,
"relative_end_day": 2,
"relative_end_month": 7,
}, {
'title': 'annually task 3',
"relative_start_day": 3,
"relative_start_month": 7,
"relative_end_day": 3,
"relative_end_month": 7,
}, {
'title': 'annually task 4',
"relative_start_day": 4,
"relative_start_month": 7,
"relative_end_day": 4,
"relative_end_month": 7,
}, {
'title': 'annually task 5',
"relative_start_day": 6,
"relative_start_month": 7,
"relative_end_day": 6,
"relative_end_month": 7,
}],
"task_group_objects": []
},
]
}
with freezegun.freeze_time("2015-07-03 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2016, 7, 1))
cycle = db.session.query(models.Cycle).filter(
models.Cycle.workflow_id == wf.id).one()
self.assertEqual(cycle.start_date, datetime.date(2015, 7, 1))
self.assertEqual(cycle.end_date, datetime.date(2015, 7, 6))
_, cycle = self.generator.generate_cycle(wf)
self.assertEqual(cycle.start_date, datetime.date(2016, 7, 1))
self.assertEqual(cycle.end_date, datetime.date(2016, 7, 6))
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2017, 6, 30))
def test_delete_all_ts_after_cs_were_already_created_and_create_new_tg(self):
"""Check that workflow doesn't reset next cycle start date when
all tasks are deleted after cycles were already created"""
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annually task 1',
"relative_start_day": 15,
"relative_start_month": 7,
"relative_end_day": 19,
"relative_end_month": 8,
}],
"task_group_objects": []
},
]
}
new_task_group = {
"title": "task group 2",
'task_group_tasks': [
{
'title': 'annually task 1',
"relative_start_day": 13,
"relative_start_month": 7,
"relative_end_day": 17,
"relative_end_month": 7,
}],
"task_group_objects": []
}
with freezegun.freeze_time("2015-6-9 13:00:00"): # Tuesday, 6/9/2015
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2015, 7, 15))
_, cycle = self.generator.generate_cycle(wf)
self.assertEqual(cycle.start_date, datetime.date(2015, 7, 15))
self.assertEqual(cycle.end_date, datetime.date(2015, 8, 19))
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2016, 7, 15))
_, cycle = self.generator.generate_cycle(wf) # 2016
_, cycle = self.generator.generate_cycle(wf) # 2017
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2018, 7, 13))
tg = db.session.query(models.TaskGroup).filter(
models.TaskGroup.workflow_id == wf.id).one()
response = self.generator.api.delete(tg)
self.assert200(response)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date, None)
_, tg = self.generator.generate_task_group(wf, data=new_task_group)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2018, 7, 13))
_, cycle = self.generator.generate_cycle(wf)
self.assertEqual(cycle.start_date, datetime.date(2018, 7, 13))
self.assertEqual(cycle.end_date, datetime.date(2018, 7, 17))
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2019, 7, 12))
def test_workflow_mid_cycle_verify(self):
annually_wf = {
"title": "annually thingy",
"description": "start this many a time",
"frequency": "annually",
"task_groups": [{
"title": "task group",
"task_group_tasks": [
{
'title': 'annual task 1',
"relative_start_day": 1,
"relative_start_month": 8,
"relative_end_day": 4,
"relative_end_month": 8,
},
{
'title': 'annual task 2',
"relative_start_day": 5,
"relative_start_month": 8,
"relative_end_day": 8,
"relative_end_month": 8,
},
{
'title': 'annual task 3',
"relative_start_day": 9,
"relative_start_month": 8,
"relative_end_day": 15,
"relative_end_month": 8,
},
{
'title': 'annual task 4',
"relative_start_day": 16,
"relative_start_month": 8,
"relative_end_day": 19,
"relative_end_month": 8,
},
{
'title': 'annual task 5',
"relative_start_day": 20,
"relative_start_month": 8,
"relative_end_day": 23,
"relative_end_month": 8,
}],
"task_group_objects": []
},
]
}
with freezegun.freeze_time("2015-8-10 13:00"):
_, wf = self.generator.generate_workflow(annually_wf)
_, awf = self.generator.activate_workflow(wf)
active_wf = db.session.query(models.Workflow).filter(
models.Workflow.id == wf.id).one()
self.assertEqual(active_wf.status, "Active")
self.assertEqual(active_wf.next_cycle_start_date,
datetime.date(2016, 8, 1))
cycle = db.session.query(models.Cycle).filter(
models.Cycle.workflow_id == wf.id).one()
self.assertEqual(cycle.start_date, datetime.date(2015, 7, 31))
self.assertEqual(cycle.end_date, datetime.date(2015, 8, 21))
| |
from __future__ import absolute_import
# Copyright (c) 2010-2015 openpyxl
"""
Enclosing chart object. The various chart types are actually child objects.
Will probably need to call this indirectly
"""
from openpyxl.compat import unicode
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
Bool,
Float,
Typed,
MinMax,
Integer,
NoneSet,
String,
Alias,
Sequence,
)
from openpyxl.descriptors.excel import (
Percentage,
ExtensionList
)
from openpyxl.descriptors.nested import (
NestedBool,
NestedNoneSet,
NestedInteger,
NestedString,
NestedMinMax,
NestedText,
)
from openpyxl.drawing.colors import ColorMapping
from .text import Text, RichText
from .layout import Layout
from .shapes import ShapeProperties
from .legend import Legend
from .marker import PictureOptions, Marker
from .label import DataLabel
from ._3d import _3DBase, View3D
from .area_chart import AreaChart, AreaChart3D
from .bar_chart import BarChart, BarChart3D
from .bubble_chart import BubbleChart
from .line_chart import LineChart, LineChart3D
from .pie_chart import PieChart, PieChart3D, ProjectedPieChart, DoughnutChart
from .radar_chart import RadarChart
from .scatter_chart import ScatterChart
from .stock_chart import StockChart
from .surface_chart import SurfaceChart, SurfaceChart3D
from .axis import NumericAxis, TextAxis, SeriesAxis, DateAxis
from .title import Title
from openpyxl.xml.functions import Element
from openpyxl.worksheet.page import PageMargins, PrintPageSetup
from openpyxl.worksheet.header_footer import HeaderFooter
class PivotFormat(Serialisable):
tagname = "pivotFmt"
idx = NestedInteger(nested=True)
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
ShapeProperties = Alias("spPr")
txPr = Typed(expected_type=RichText, allow_none=True)
TextBody = Alias("txPr")
marker = Typed(expected_type=Marker, allow_none=True)
dLbl = Typed(expected_type=DataLabel, allow_none=True)
DataLabel = Alias("dLbl")
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('idx', 'spPr', 'txPr', 'marker', 'dLbl')
def __init__(self,
idx=0,
spPr=None,
txPr=None,
marker=None,
dLbl=None,
extLst=None,
):
self.idx = idx
self.spPr = spPr
self.txPr = txPr
self.marker = marker
self.dLbl = dLbl
class PivotFormats(Serialisable):
tagname = "pivotFmts"
pivotFmt = Sequence(expected_type=PivotFormat, allow_none=True)
__elements__ = ('pivotFmt',)
def __init__(self,
pivotFmt=(),
):
self.pivotFmt = pivotFmt
class DataTable(Serialisable):
tagname = "dTable"
showHorzBorder = NestedBool(allow_none=True)
showVertBorder = NestedBool(allow_none=True)
showOutline = NestedBool(allow_none=True)
showKeys = NestedBool(allow_none=True)
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
ShapeProperties = Alias('spPr')
txPr = Typed(expected_type=RichText, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('showHorzBorder', 'showVertBorder', 'showOutline',
'showKeys', 'spPr', 'txPr')
def __init__(self,
showHorzBorder=None,
showVertBorder=None,
showOutline=None,
showKeys=None,
spPr=None,
txPr=None,
extLst=None,
):
self.showHorzBorder = showHorzBorder
self.showVertBorder = showVertBorder
self.showOutline = showOutline
self.showKeys = showKeys
self.spPr = spPr
self.txPr = txPr
class PlotArea(Serialisable):
tagname = "plotArea"
layout = Typed(expected_type=Layout, allow_none=True)
dTable = Typed(expected_type=DataTable, allow_none=True)
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
graphical_properties = Alias("spPr")
extLst = Typed(expected_type=ExtensionList, allow_none=True)
# at least one chart
areaChart = Typed(expected_type=AreaChart, allow_none=True)
area3DChart = Typed(expected_type=AreaChart3D, allow_none=True)
lineChart = Typed(expected_type=LineChart, allow_none=True)
line3DChart = Typed(expected_type=LineChart3D, allow_none=True)
stockChart = Typed(expected_type=StockChart, allow_none=True)
radarChart = Typed(expected_type=RadarChart, allow_none=True)
scatterChart = Typed(expected_type=ScatterChart, allow_none=True)
pieChart = Typed(expected_type=PieChart, allow_none=True)
pie3DChart = Typed(expected_type=PieChart3D, allow_none=True)
doughnutChart = Typed(expected_type=DoughnutChart, allow_none=True)
barChart = Typed(expected_type=BarChart, allow_none=True)
bar3DChart = Typed(expected_type=BarChart3D, allow_none=True)
ofPieChart = Typed(expected_type=ProjectedPieChart, allow_none=True)
surfaceChart = Typed(expected_type=SurfaceChart, allow_none=True)
surface3DChart = Typed(expected_type=SurfaceChart3D, allow_none=True)
bubbleChart = Typed(expected_type=BubbleChart, allow_none=True)
# maybe axes
valAx = Sequence(expected_type=NumericAxis, allow_none=True)
catAx = Sequence(expected_type=TextAxis, allow_none=True)
dateAx = Sequence(expected_type=DateAxis, allow_none=True)
serAx = Sequence(expected_type=SeriesAxis, allow_none=True)
__elements__ = ('layout', 'areaChart', 'area3DChart', 'lineChart',
'line3DChart', 'stockChart', 'radarChart', 'scatterChart', 'pieChart',
'pie3DChart', 'doughnutChart', 'barChart', 'bar3DChart', 'ofPieChart',
'surfaceChart', 'surface3DChart', 'bubbleChart', 'valAx', 'catAx', 'dateAx', 'serAx',
'dTable', 'spPr')
def __init__(self,
layout=None,
dTable=None,
spPr=None,
areaChart=None,
area3DChart=None,
lineChart=None,
line3DChart=None,
stockChart=None,
radarChart=None,
scatterChart=None,
pieChart=None,
pie3DChart=None,
doughnutChart=None,
barChart=None,
bar3DChart=None,
ofPieChart=None,
surfaceChart=None,
surface3DChart=None,
bubbleChart=None,
valAx=(),
catAx=(),
serAx=(),
dateAx=(),
extLst=None,
):
self.layout = layout
self.dTable = dTable
self.spPr = spPr
self.areaChart = areaChart
self.area3DChart = area3DChart
self.lineChart = lineChart
self.line3DChart = line3DChart
self.stockChart = stockChart
self.radarChart = radarChart
self.scatterChart = scatterChart
self.pieChart = pieChart
self.pie3DChart = pie3DChart
self.doughnutChart = doughnutChart
self.barChart = barChart
self.bar3DChart = bar3DChart
self.ofPieChart = ofPieChart
self.surfaceChart = surfaceChart
self.surface3DChart = surface3DChart
self.bubbleChart = bubbleChart
self.valAx = valAx
self.catAx = catAx
self.dateAx = dateAx
self.serAx = serAx
self._charts = []
def to_tree(self, tagname=None, idx=None):
if tagname is None:
tagname = self.tagname
el = Element(tagname)
if self.layout is not None:
el.append(self.layout.to_tree())
for chart in self._charts:
el.append(chart.to_tree())
for ax in ['valAx', 'catAx', 'dateAx', 'serAx',]:
seq = getattr(self, ax)
if seq:
for obj in seq:
el.append(obj.to_tree())
for attr in ['dTable', 'spPr']:
obj = getattr(self, attr)
if obj is not None:
el.append(obj.to_tree())
return el
class ChartContainer(Serialisable):
tagname = "chart"
title = Typed(expected_type=Title, allow_none=True)
autoTitleDeleted = NestedBool(allow_none=True)
pivotFmts = Typed(expected_type=PivotFormats, allow_none=True)
view3D = _3DBase.view3D
floor = _3DBase.floor
sideWall = _3DBase.sideWall
backWall = _3DBase.backWall
plotArea = Typed(expected_type=PlotArea, )
legend = Typed(expected_type=Legend, allow_none=True)
plotVisOnly = NestedBool(allow_none=True)
dispBlanksAs = NestedNoneSet(values=(['span', 'gap', 'zero']))
showDLblsOverMax = NestedBool(allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('title', 'autoTitleDeleted', 'pivotFmts', 'view3D',
'floor', 'sideWall', 'backWall', 'plotArea', 'legend', 'plotVisOnly',
'dispBlanksAs', 'showDLblsOverMax')
def __init__(self,
title=None,
autoTitleDeleted=None,
pivotFmts=None,
view3D=None,
floor=None,
sideWall=None,
backWall=None,
plotArea=None,
legend=None,
plotVisOnly=None,
dispBlanksAs="gap",
showDLblsOverMax=None,
extLst=None,
):
self.title = title
self.autoTitleDeleted = autoTitleDeleted
self.pivotFmts = pivotFmts
self.view3D = view3D
self.floor = floor
self.sideWall = sideWall
self.backWall = backWall
if plotArea is None:
plotArea = PlotArea()
self.plotArea = plotArea
self.legend = legend
self.plotVisOnly = plotVisOnly
self.dispBlanksAs = dispBlanksAs
self.showDLblsOverMax = showDLblsOverMax
class Protection(Serialisable):
tagname = "protection"
chartObject = NestedBool(allow_none=True)
data = NestedBool(allow_none=True)
formatting = NestedBool(allow_none=True)
selection = NestedBool(allow_none=True)
userInterface = NestedBool(allow_none=True)
__elements__ = ("chartObject", "data", "formatting", "selection", "userInterface")
def __init__(self,
chartObject=None,
data=None,
formatting=None,
selection=None,
userInterface=None,
):
self.chartObject = chartObject
self.data = data
self.formatting = formatting
self.selection = selection
self.userInterface = userInterface
class PivotSource(Serialisable):
tagname = "pivotSource"
name = NestedText(expected_type=unicode)
fmtId = NestedText(expected_type=int)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('name', 'fmtId')
def __init__(self,
name=None,
fmtId=None,
extLst=None,
):
self.name = name
self.fmtId = fmtId
class ExternalData(Serialisable):
tagname = "externalData"
autoUpdate = NestedBool(allow_none=True)
id = String() # Needs namespace
def __init__(self,
autoUpdate=None,
id=None
):
self.autoUpdate = autoUpdate
self.id = id
class RelId(Serialisable):
pass # todo
class PrintSettings(Serialisable):
tagname = "printSettings"
headerFooter = Typed(expected_type=HeaderFooter, allow_none=True)
pageMargins = Typed(expected_type=PageMargins, allow_none=True)
pageSetup = Typed(expected_type=PrintPageSetup, allow_none=True)
__elements__ = ("headerFooter", "pageMargins", "pageMargins")
def __init__(self,
headerFooter=None,
pageMargins=None,
pageSetup=None,
):
self.headerFooter = headerFooter
self.pageMargins = pageMargins
self.pageSetup = pageSetup
class ChartSpace(Serialisable):
tagname = "chartSpace"
date1904 = NestedBool(allow_none=True)
lang = NestedString(allow_none=True)
roundedCorners = NestedBool(allow_none=True)
style = NestedInteger(allow_none=True)
clrMapOvr = Typed(expected_type=ColorMapping, allow_none=True)
pivotSource = Typed(expected_type=PivotSource, allow_none=True)
protection = Typed(expected_type=Protection, allow_none=True)
chart = Typed(expected_type=ChartContainer)
spPr = Typed(expected_type=ShapeProperties, allow_none=True)
txPr = Typed(expected_type=RichText, allow_none=True)
externalData = Typed(expected_type=ExternalData, allow_none=True)
printSettings = Typed(expected_type=PrintSettings, allow_none=True)
userShapes = Typed(expected_type=RelId, allow_none=True)
extLst = Typed(expected_type=ExtensionList, allow_none=True)
__elements__ = ('date1904', 'lang', 'roundedCorners', 'style',
'clrMapOvr', 'pivotSource', 'protection', 'chart', 'spPr', 'txPr',
'externalData', 'printSettings', 'userShapes')
def __init__(self,
date1904=None,
lang=None,
roundedCorners=None,
style=None,
clrMapOvr=None,
pivotSource=None,
protection=None,
chart=None,
spPr=None,
txPr=None,
externalData=None,
printSettings=None,
userShapes=None,
extLst=None,
):
self.date1904 = date1904
self.lang = lang
self.roundedCorners = roundedCorners
self.style = style
self.clrMapOvr = clrMapOvr
self.pivotSource = pivotSource
self.protection = protection
self.chart = chart
self.spPr = spPr
self.txPr = txPr
self.externalData = externalData
self.printSettings = printSettings
self.userShapes = userShapes
self.extLst = extLst
| |
import pygtk
import numpy
pygtk.require('2.0')
from trackball import *
haveGTK_GL = True
try:
import gtk.gtkgl
from OpenGL.GL import *
from OpenGL.GLU import *
except ImportError:
haveGTK_GL = False
def ViewerOkay():
return haveGTK_GL
#(file, pathname, desc) = imp.find_module("gtk.gtkgl.apputils")
#if (file != None):
# print "Found gtk.gtkgl.apputils. Description = " + desc
# imp.load_module("gtk.gtkgl.apputils", file, pathname, desc)
class StructureViewer(gtk.gtkgl.DrawingArea):
def __init__(self):
try:
# try double-buffered
glconfig = gtk.gdkgl.Config(mode=(gtk.gdkgl.MODE_RGB |
gtk.gdkgl.MODE_DOUBLE |
gtk.gdkgl.MODE_DEPTH))
except gtk.gdkgl.NoMatches:
# try single-buffered
glconfig = gtk.gdkgl.Config(mode=(gtk.gdkgl.MODE_RGB |
gtk.gdkgl.MODE_DEPTH))
gtk.gtkgl.DrawingArea.__init__(self, glconfig)
self.connect_after("realize" , self.init )
self.connect("configure_event" , self.reshape )
self.connect("expose_event" , self.display )
self.connect("map_event" , self.map )
self.connect("button_press_event" , self.button_press)
self.connect("motion_notify_event", self.button_motion)
self.atom_pos = 8.0*numpy.array([[0.00, 0.00, 0.00],\
[0.25, 0.25, 0.25]])
self.lattice = 8.0*numpy.array([[0.50, 0.50, 0.00],\
[0.50, 0.00, 0.50],\
[0.00, 0.50, 0.00]])
self.set_events(gtk.gdk.BUTTON1_MOTION_MASK |
gtk.gdk.BUTTON2_MOTION_MASK |
gtk.gdk.BUTTON3_MOTION_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.VISIBILITY_NOTIFY_MASK |
gtk.gdk.SCROLL_MASK)
self.Scale = 1.0
self.Distance = 3.0
self.tb = Trackball()
def button_press(self, glDrawArea, event):
if (event.button == 1):
self.StartX = event.x
self.StartY = event.y
self.Button1Pressed=True
self.Button2Pressed=False
self.Button3Pressed=False
elif (event.button == 2):
self.StartX = event.x
self.StartY = event.y
self.Button1Pressed=False
self.Button2Pressed=True
self.Button3Pressed=False
self.OldScale = self.Scale
elif (event.button == 3):
self.StartX = event.x
self.StartY = event.y
self.Button1Pressed=False
self.Button2Pressed=False
self.Button3Pressed=True
print 'button pressed at (%3d,%3d)' % (event.x, event.y)
def button_motion(self, glDrawArea, event):
if (self.Button3Pressed):
print 'translate'
elif (self.Button1Pressed):
w = self.get_allocation()[2]
h = self.get_allocation()[3]
x = event.x
y = event.y
self.tb.update(self.StartX, self.StartY, x, y, w, h)
self.display(self, None)
#print self.tb.matrix
def set_lattice(self, lattice):
self.lattice = lattice
self.BoxList = glGenLists(1)
glNewList (self.BoxList, GL_COMPILE)
a = []
ma = []
a.append (numpy.array([lattice[0,0], lattice[0,1], lattice[0,2]]))
a.append (numpy.array([lattice[1,0], lattice[1,1], lattice[1,2]]))
a.append (numpy.array([lattice[2,0], lattice[2,1], lattice[2,2]]))
ma.append(-1.0*a[0])
ma.append(-1.0*a[1])
ma.append(-1.0*a[2])
r = []
r.append(0.5*(ma[0] + ma[1] + ma[2]));
r.append(0.5*(ma[0] + ma[1] + a[2]));
r.append(0.5*(ma[0] + a[1] + a[2]));
r.append(0.5*(ma[0] + a[1] + ma[2]));
r.append(0.5*( a[0] + ma[1] + ma[2]));
r.append(0.5*( a[0] + ma[1] + a[2]));
r.append(0.5*( a[0] + a[1] + a[2]));
r.append(0.5*( a[0] + a[1] + ma[2]));
glColor3d (0.3, 0.3, 0.3)
c = (1.0, 1.0, 1.0, 1.0)
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, c)
glLineWidth (2.0);
p01 = numpy.cross (self.lattice[0], self.lattice[1])
p12 = numpy.cross (self.lattice[1], self.lattice[2]);
p20 = numpy.cross (self.lattice[2], self.lattice[0]);
p01 = 1.0/numpy.sqrt(numpy.dot(p01, p01)) * p01;
p12 = 1.0/numpy.sqrt(numpy.dot(p12, p12)) * p12;
p20 = 1.0/numpy.sqrt(numpy.dot(p20, p20)) * p20;
d01 = abs(0.5001*numpy.dot(self.lattice[2], p01));
d12 = abs(0.5001*numpy.dot(self.lattice[0], p12));
d20 = abs(0.5001*numpy.dot(self.lattice[1], p20));
eqn0 = ( p01[0], p01[1], p01[2], d01);
eqn1 = (-p01[0],-p01[1],-p01[2], d01);
eqn2 = ( p12[0], p12[1], p12[2], d12);
eqn3 = (-p12[0],-p12[1],-p12[2], d12);
eqn4 = ( p20[0], p20[1], p20[2], d20);
eqn5 = (-p20[0],-p20[1],-p20[2], d20);
glClipPlane(GL_CLIP_PLANE0, eqn0);
glClipPlane(GL_CLIP_PLANE1, eqn1);
glClipPlane(GL_CLIP_PLANE2, eqn2);
glClipPlane(GL_CLIP_PLANE3, eqn3);
glClipPlane(GL_CLIP_PLANE4, eqn4);
glClipPlane(GL_CLIP_PLANE5, eqn5);
# glEnable(GL_CLIP_PLANE0);
# glEnable(GL_CLIP_PLANE1);
# glEnable(GL_CLIP_PLANE2);
# glEnable(GL_CLIP_PLANE3);
# glEnable(GL_CLIP_PLANE4);
# glEnable(GL_CLIP_PLANE5);
glBegin(GL_LINES);
glNormal3d(1.0, 1.0, 1.0)
glVertex3d(r[0][0],r[0][1],r[0][2]);
glVertex3d(r[1][0],r[1][1],r[1][2]);
glVertex3d(r[1][0],r[1][1],r[1][2]);
glVertex3d(r[2][0],r[2][1],r[2][2]);
glVertex3d(r[2][0],r[2][1],r[2][2]);
glVertex3d(r[3][0],r[3][1],r[3][2]);
glVertex3d(r[3][0],r[3][1],r[3][2]);
glVertex3d(r[0][0],r[0][1],r[0][2]);
glVertex3d(r[4][0],r[4][1],r[4][2]);
glVertex3d(r[5][0],r[5][1],r[5][2]);
glVertex3d(r[5][0],r[5][1],r[5][2]);
glVertex3d(r[6][0],r[6][1],r[6][2]);
glVertex3d(r[6][0],r[6][1],r[6][2]);
glVertex3d(r[7][0],r[7][1],r[7][2]);
glVertex3d(r[7][0],r[7][1],r[7][2]);
glVertex3d(r[4][0],r[4][1],r[4][2]);
glVertex3d(r[0][0],r[0][1],r[0][2]);
glVertex3d(r[4][0],r[4][1],r[4][2]);
glVertex3d(r[1][0],r[1][1],r[1][2]);
glVertex3d(r[5][0],r[5][1],r[5][2]);
glVertex3d(r[2][0],r[2][1],r[2][2]);
glVertex3d(r[6][0],r[6][1],r[6][2]);
glVertex3d(r[3][0],r[3][1],r[3][2]);
glVertex3d(r[7][0],r[7][1],r[7][2]);
glEnd()
glEndList()
def init (self, glDrawingArea):
glcontext = self.get_gl_context()
gldrawable = self.get_gl_drawable()
self.DisplayLists = []
self.SphereList = glGenLists(1)
glNewList(self.SphereList, GL_COMPILE)
gtk.gdkgl.draw_sphere(True, 1.0, 30, 30)
glEndList()
self.set_lattice(self.lattice)
self.redraw()
glShadeModel(GL_SMOOTH);
glEnable (GL_LIGHTING);
glEnable (GL_LINE_SMOOTH);
glEnable (GL_POLYGON_SMOOTH);
glDisable (GL_POLYGON_SMOOTH);
# glEnable (GL_MULTISAMPLE);
glEnable (GL_COLOR_MATERIAL);
glEnable(GL_LIGHT0)
diffuse = (1.0, 1.0, 1.0, 1.0)
ambient = (0.001, 0.001, 0.001, 1.0)
specular = (1.0, 1.0, 1.0, 1.0)
position = (1.0, 1.0, 1.0, 0.0)
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuse)
glLightfv(GL_LIGHT0, GL_AMBIENT, ambient)
glLightfv(GL_LIGHT0, GL_SPECULAR, specular)
glLightfv(GL_LIGHT0, GL_POSITION, position)
glMaterialfv (GL_FRONT_AND_BACK, GL_SPECULAR, specular)
glMateriali(GL_FRONT_AND_BACK, GL_SHININESS, 92)
glColorMaterial(GL_FRONT_AND_BACK, GL_DIFFUSE)
(width, height) = self.window.get_size()
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(40.0, 1.0, 1.0, 10.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# gluLookAt(0.0, 0.0, 3.0,\
# 0.0, 0.0, 0.0,\
# 0.0, 1.0, 0.0)
glEnable(GL_AUTO_NORMAL)
glEnable(GL_NORMALIZE)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glEnable(GL_COLOR_MATERIAL)
self.reshape (None, None)
def reshape(self, glDrawArea, event):
# get GLContext and GLDrawable
glcontext = self.get_gl_context()
gldrawable = self.get_gl_drawable()
# GL calls
if not gldrawable.gl_begin(glcontext): return
x, y, width, height = self.get_allocation()
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if width > height:
w = float(width) / float(height)
glFrustum(-w, w, -1.0, 1.0, 5.0, 60.0)
else:
h = float(height) / float(width)
glFrustum(-1.0, 1.0, -h, h, 5.0, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslatef(0.0, 0.0, -40.0)
gldrawable.gl_end()
return True
def redraw(self):
# glMatrixMode(GL_PROJECTION)
# glLoadIdentity();
# width = self.get_allocation()[2]
# height = self.get_allocation()[3]
# ratio = width/height
# gluPerspective(40.0, ratio, 1.0, 8.0*self.Distance/Scale)
# glMatrixMode(GL_MODELVIEW);
# glLoadIdentity();
# gluLookAt(0.0, 0.0, self.Distance/Scale,\
# 0.0, 0.0, 0.0,\
# 0.0, 1.0, 0.0);
# glTranslatef(0.0, 0.0, -self.Distance/Scale);
# glScaled(Scale, Scale, Scale);
for l in self.DisplayLists:
glDeleteLists(l,1)
for r in self.atom_pos:
rtran = r - 0.5*(self.lattice[0] + self.lattice[1] + self.lattice[2])
list = glGenLists(1)
self.DisplayLists.append(list)
glNewList(list, GL_COMPILE)
glPushMatrix();
glTranslated (rtran[0], rtran[1], rtran[2])
glCallList(self.SphereList)
glPopMatrix();
glEndList()
def display(self, glDrawArea, event):
# get GLContext and GLDrawable
glcontext = self.get_gl_context()
gldrawable = glDrawArea.get_gl_drawable()
# GL calls
if not gldrawable.gl_begin(glcontext): return
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glPushMatrix()
glMultMatrixd(self.tb.matrix);
# Do stuff here
# glMatrixMode(GL_MODELVIEW)
# glLoadIdentity()
glTranslatef(0.0, 0.0, -5.0)
#gtk.gdkgl.draw_sphere(True, 1.0, 30, 30)
#glCallList(self.SphereList)
glCallList(self.BoxList)
glColor ([1.0, 0.0, 0.0, 1.0])
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
for list in self.DisplayLists:
glCallList(list)
glPopMatrix()
if gldrawable.is_double_buffered():
gldrawable.swap_buffers()
else:
glFlush()
gldrawable.gl_end()
# Invalidate whole window.
# self.window.invalidate_rect(self.allocation, False)
# Update window synchronously (fast).
# self.window.process_updates(False)
def map(self, event, dummy):
print "map_event"
# def set_lattice (self, lattice):
# print "set_lattice"
def set_atoms (self, atomList):
print "set_atoms"
| |
'''
This is the main app file
Code is available here: https://github.com/zugaldia/cartoglass
'''
# Libraries provided by Python/App Engine (see app.yaml)
# Note this is easily portable to other cloud services (users -> oauth2)
from google.appengine.api import users
from google.appengine.ext import db
from random import randint
import jinja2
import json
import logging
import webapp2
# Libraries included in the packages folder
from apiclient.discovery import build
from oauth2client.appengine import OAuth2Decorator, StorageByKeyName, CredentialsProperty
import httplib2
import requests
# Our secret credentials, DO NOT include this in GitHub
from config import GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET, CARTODB_API_KEY
# Set up Jinja2's environment (template system) with default values and
# folder `templates` as the root for our HTML templates
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'))
# Scopes for OAuth2 (authorization)
GOOGLE_SCOPES = [
# These are the two scopes related to Glass, we'll use both:
'https://www.googleapis.com/auth/glass.timeline',
'https://www.googleapis.com/auth/glass.location',
# Include this two if you need more information about the user,
# or any other Google OAuth2 scope (drive, google+, ...)
# 'https://www.googleapis.com/auth/userinfo.email',
# 'https://www.googleapis.com/auth/userinfo.profile'
]
# This decorator handle all of the OAuth 2.0 steps without you having to use
# any Flow, Credentials, or Storage objects.
decorator = OAuth2Decorator(
client_id=GOOGLE_CLIENT_ID,
client_secret=GOOGLE_CLIENT_SECRET,
scope=GOOGLE_SCOPES,
# Forces to get a refresh token, fixes { "error" : "invalid_grant" }
# approval_prompt='force'
)
# The model where we'll store the credentials
class CredentialsModel(db.Model):
credentials = CredentialsProperty()
# We'll use this to execute all calls to the Mirror API
service = build('mirror', 'v1')
class MainHandler(webapp2.RequestHandler):
def get(self):
# Render the template, this could be moved to a base RequestHandler
# as described here:
# http://webapp-improved.appspot.com/api/webapp2_extras/jinja2.html
template = jinja_environment.get_template('landing.html')
self.response.write(template.render())
class InstallHandler(webapp2.RequestHandler):
# One line of code to handle the OAuth2 dance
@decorator.oauth_required
def get(self):
# Get the authorized HTTP object created by the decorator.
# @decorator.oauth_aware gives you more control of the flow if needed
self.http = decorator.http()
# You could use the user_id to keep track of per user installs
self.user_id = users.get_current_user().user_id()
# It's always a good idea to surround API calls with try/except/else
try:
# Once we have proper authorization we're gonna carry out three tasks:
# (these tasks could be moved to a GlassService class, and benefit from
# batch requests.)
self._send_welcome_card()
self._subscribe_to_timeline()
self._subscribe_to_location()
except Exception as e:
logging.error('Install failed: %s' % str(e))
all_good = False
else:
all_good = True
# Render the template
template = jinja_environment.get_template('install.html')
self.response.write(template.render({'all_good': all_good}))
def _send_welcome_card(self):
# Body of the request
# If this gets too complicated, you could create a wrapping class
body = {
# Describes how important the notification is. DEFAULT is the only
# allowed value.
'notification': {'level': 'DEFAULT'},
# A speakable description of the type of this item. This will be
# announced to the user prior to reading the content of the item
# in cases where the additional context is useful, for example
# when the user requests that the item be read aloud following a
# notification.
'speakableType': 'Welcome card',
# This accompanies the read aloud menu item
'speakableText': 'You can\'t see me, but you can hear me',
# This is the most basic way of providing content, it looks great
# in many situations (ignored if you use HTML content). It
# auto-resizes according to the text length.
'text': 'Hello Glassingtonian!',
# You can use the playground to see how it's gonna look like:
# (pretty close to the text before, but with Glassingtonian in bold yellow)
# https://developers.google.com/glass/playground. And the CSS:
# https://mirror-api-playground.appspot.com/assets/css/base_style.css
'html': '<article><section><p class="text-auto-size">Hello '
'<strong class="yellow">Glassingtonian</strong>!</p></section></article>',
# Normally you won't need so many items, this just for demo purposes
'menuItems': [
# These are all built-in (free implementation) actions
{ 'action': 'READ_ALOUD' },
{ 'action': 'TOGGLE_PINNED' },
{ 'action': 'DELETE' },
# Built-in actions with a payload (note the READ_ALOUD inconsinstency)
{ 'action': 'OPEN_URI',
'payload': 'http://www.techmeme.com' },
{ 'action': 'PLAY_VIDEO',
'payload': 'https://cartoglass.appspot.com/static/video.mp4' },
# This is how you define a custom action
{ 'action': 'CUSTOM',
'id': 'GUESS_A_NUMBER',
'values': [ {
'displayName': 'Guess a number',
'iconUrl': 'https://cartoglass.appspot.com/static/glyphicons_009_magic.png',
# You can also include states for PENDING and CONFIRMED
'state': 'DEFAULT'
}]
}
]
}
# Authenticated request
response = service.timeline().insert(body=body).execute(http=self.http)
logging.info(response)
def _subscribe_to_timeline(self):
# Body of the request
body = {
'collection': 'timeline',
# A secret token sent to the subscriber in notifications so that
# it can verify that the notification was generated by Google.
'verifyToken': 'I_AM_YOUR_FATHER',
# An opaque token sent to the subscriber in notifications so that
# it can determine the ID of the user. You could obfuscate this
# value for increased security
'userToken': self.user_id,
# Notice the HTTPS. This won't work for localhost eiter.
# During development, you use the subscription proxy:
# https://developers.google.com/glass/subscription-proxy
'callbackUrl': 'https://cartoglass.appspot.com/subscription'
}
# Authenticated request
response = service.subscriptions().insert(body=body).execute(http=self.http)
logging.info(response)
def _subscribe_to_location(self):
# Same as timeline, only replacing the `collection` name
body = {
'collection': 'locations',
'verifyToken': 'I_AM_YOUR_FATHER',
'userToken': self.user_id,
'callbackUrl': 'https://cartoglass.appspot.com/subscription'
}
# Authenticated request
response = service.subscriptions().insert(body=body).execute(http=self.http)
logging.info(response)
class SubscriptionHandler(webapp2.RequestHandler):
# Note it comes as a POST request. If you fail to respond, the mirror API
# will retry up to 5 times.
def post(self):
data = json.loads(self.request.body)
# Returns a 403 Forbidden (authenticating will make no difference)
if data.get('verifyToken') != 'I_AM_YOUR_FATHER':
logging.error('Unauthorized request to the subscription endpoint.')
return self.abort(403)
# Get the credentials, you could also check credentials.refresh_token is not None
self.user_id = data.get('userToken')
credentials = StorageByKeyName(CredentialsModel, self.user_id, 'credentials').get()
if not credentials:
logging.error('Authentication is required and has failed.')
return self.abort(401)
# http was previously authorized by the decorator
self.http = credentials.authorize(httplib2.Http())
try:
# Handle the appropriate type of subscription
if data.get('collection') == 'locations':
self._handle_location(data)
elif data.get('collection') == 'timeline':
self._handle_timeline(data)
except Exception as e:
logging.error('Failed SubscriptionHandler for user_id %s: %s',
(self.user_id, str(e)))
def _handle_location(self, data):
# Note 1: You still need another request to get the actual location
# Note 2: The courtesy limit is 1,000 requests/day and you may get
# around 1 notification per user every 10 minutes. That means you
# should NOT do this every single time, otherwise, you'll be over the
# limit when you reach ~ 7 users (1,000 / 24 * 6).
response = service.locations().get(id=data.get('itemId')).execute(http=self.http)
logging.info(response)
# Note 3: Slight inconsistency with Android (address, speed, altitude, types)
# Location in Glass has the following fields:
# timestamp: datetime,
# latitude: double,
# longitude: double,
# accuracy: double,
# displayName: string,
# address: string
# In Android, however, android.location.Location has the following methods:
# getAccuracy(): float
# getAltitude(): double
# getBearing(): float
# getElapsedRealtimeNanos(): long
# getLatitude(): double
# getLongitude(): double
# getProvider(): String
# getSpeed(): float
# getTime(): long
# Store the values in CartoDB
values = {'longitude': response.get('longitude'),
'latitude': response.get('latitude'),
'accuracy': response.get('accuracy'), # in meters
# Remove the single quote to respect the query format below
'address': response.get('address', '').replace("'", ""),
'display_name': response.get('displayName', '').replace("'", ""),
'user_id': self.user_id }
payload = {
'api_key': CARTODB_API_KEY,
# This is a plain PostgreSQL query
'q': "INSERT INTO cartoglass (the_geom, accuracy, address, displayname, user_id) "
"VALUES (ST_GeomFromText('POINT({longitude} {latitude})', 4326), "
"{accuracy}, '{address}', '{display_name}', '{user_id}');".format(**values)
}
# Use Requests
r = requests.get('http://zugaldia.cartodb.com/api/v2/sql', params=payload)
logging.info('CartoDB: %s (response: %s, query: %s)' %
(r.status_code, r.text, payload['q']))
def _handle_timeline(self, data):
# A list of actions taken by the user that triggered the notification.
for user_action in data.get('userActions', []):
# The user selected the custom menu item in the timeline item
if (user_action.get('type') == 'CUSTOM' and
user_action.get('payload') == 'GUESS_A_NUMBER'):
# You could carry out some server-side action here, hopefully
# more interesting than generating a random number.
random_number = randint(1, 10)
# We insert a card with a randon mumber as the user response
body = {
'notification': {'level': 'DEFAULT'},
'speakableType': 'This is so random',
'text': 'This is the number I had in mind: ' + str(random_number),
'menuItems': [{ 'action': 'DELETE' }]
}
# Authenticated request
response = service.timeline().insert(body=body).execute(http=self.http)
logging.info(response)
app = webapp2.WSGIApplication([
('/', MainHandler),
('/install', InstallHandler),
# Glass will notify you of timeline actions and location updates
# through this endpoint:
('/subscription', SubscriptionHandler),
# This will take care of the OAuth2 callback redirect for you
# See: https://developers.google.com/api-client-library/python/guide/google_app_engine
(decorator.callback_path, decorator.callback_handler()),
# You should provide an uninstall handler as part of the developer
# policies and give users a reasonably convenient way to delete any of
# their personal information you've obtained from the API.
#('/uninstall', UninstallHandler)
], debug=True)
| |
"""Utilities to evaluate the clustering performance of models.
Functions named as *_score return a scalar value to maximize: the higher the
better.
"""
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Wei LI <kuantkid@gmail.com>
# Diego Molla <dmolla-aliod@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# Gregory Stupp <stuppie@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Arya McCarthy <arya@jhu.edu>
# Uwe F Mayer <uwe_f_mayer@yahoo.com>
# License: BSD 3 clause
import warnings
from math import log
import numpy as np
from scipy import sparse as sp
from ._expected_mutual_info_fast import expected_mutual_information
from ...utils.fixes import _astype_copy_false
from ...utils.multiclass import type_of_target
from ...utils.validation import check_array, check_consistent_length
def check_clusterings(labels_true, labels_pred):
"""Check that the labels arrays are 1D and of same dimension.
Parameters
----------
labels_true : array-like of shape (n_samples,)
The true labels.
labels_pred : array-like of shape (n_samples,)
The predicted labels.
"""
labels_true = check_array(
labels_true,
ensure_2d=False,
ensure_min_samples=0,
dtype=None,
)
labels_pred = check_array(
labels_pred,
ensure_2d=False,
ensure_min_samples=0,
dtype=None,
)
type_label = type_of_target(labels_true)
type_pred = type_of_target(labels_pred)
if "continuous" in (type_pred, type_label):
msg = (
"Clustering metrics expects discrete values but received"
f" {type_label} values for label, and {type_pred} values "
"for target"
)
warnings.warn(msg, UserWarning)
# input checks
if labels_true.ndim != 1:
raise ValueError("labels_true must be 1D: shape is %r" % (labels_true.shape,))
if labels_pred.ndim != 1:
raise ValueError("labels_pred must be 1D: shape is %r" % (labels_pred.shape,))
check_consistent_length(labels_true, labels_pred)
return labels_true, labels_pred
def _generalized_average(U, V, average_method):
"""Return a particular mean of two numbers."""
if average_method == "min":
return min(U, V)
elif average_method == "geometric":
return np.sqrt(U * V)
elif average_method == "arithmetic":
return np.mean([U, V])
elif average_method == "max":
return max(U, V)
else:
raise ValueError(
"'average_method' must be 'min', 'geometric', 'arithmetic', or 'max'"
)
def contingency_matrix(
labels_true, labels_pred, *, eps=None, sparse=False, dtype=np.int64
):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate.
eps : float, default=None
If a float, that value is added to all values in the contingency
matrix. This helps to stop NaN propagation.
If ``None``, nothing is adjusted.
sparse : bool, default=False
If `True`, return a sparse CSR continency matrix. If `eps` is not
`None` and `sparse` is `True` will raise ValueError.
.. versionadded:: 0.18
dtype : numeric type, default=np.int64
Output dtype. Ignored if `eps` is not `None`.
.. versionadded:: 0.24
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer unless set
otherwise with the ``dtype`` argument. If ``eps`` is given, the dtype
will be float.
Will be a ``sklearn.sparse.csr_matrix`` if ``sparse=True``.
"""
if eps is not None and sparse:
raise ValueError("Cannot set 'eps' when sparse=True")
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
contingency = sp.coo_matrix(
(np.ones(class_idx.shape[0]), (class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=dtype,
)
if sparse:
contingency = contingency.tocsr()
contingency.sum_duplicates()
else:
contingency = contingency.toarray()
if eps is not None:
# don't use += as contingency is integer
contingency = contingency + eps
return contingency
# clustering measures
def pair_confusion_matrix(labels_true, labels_pred):
"""Pair confusion matrix arising from two clusterings.
The pair confusion matrix :math:`C` computes a 2 by 2 similarity matrix
between two clusterings by considering all pairs of samples and counting
pairs that are assigned into the same or into different clusters under
the true and predicted clusterings.
Considering a pair of samples that is clustered together a positive pair,
then as in binary classification the count of true negatives is
:math:`C_{00}`, false negatives is :math:`C_{10}`, true positives is
:math:`C_{11}` and false positives is :math:`C_{01}`.
Read more in the :ref:`User Guide <pair_confusion_matrix>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=integral
Cluster labels to evaluate.
Returns
-------
C : ndarray of shape (2, 2), dtype=np.int64
The contingency matrix.
See Also
--------
rand_score: Rand Score
adjusted_rand_score: Adjusted Rand Score
adjusted_mutual_info_score: Adjusted Mutual Information
Examples
--------
Perfectly matching labelings have all non-zero entries on the
diagonal regardless of actual label values:
>>> from sklearn.metrics.cluster import pair_confusion_matrix
>>> pair_confusion_matrix([0, 0, 1, 1], [1, 1, 0, 0])
array([[8, 0],
[0, 4]]...
Labelings that assign all classes members to the same clusters
are complete but may be not always pure, hence penalized, and
have some off-diagonal non-zero entries:
>>> pair_confusion_matrix([0, 0, 1, 2], [0, 0, 1, 1])
array([[8, 2],
[0, 2]]...
Note that the matrix is not symmetric.
References
----------
.. L. Hubert and P. Arabie, Comparing Partitions, Journal of
Classification 1985
https://link.springer.com/article/10.1007%2FBF01908075
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = np.int64(labels_true.shape[0])
# Computation using the contingency data
contingency = contingency_matrix(
labels_true, labels_pred, sparse=True, dtype=np.int64
)
n_c = np.ravel(contingency.sum(axis=1))
n_k = np.ravel(contingency.sum(axis=0))
sum_squares = (contingency.data ** 2).sum()
C = np.empty((2, 2), dtype=np.int64)
C[1, 1] = sum_squares - n_samples
C[0, 1] = contingency.dot(n_k).sum() - sum_squares
C[1, 0] = contingency.transpose().dot(n_c).sum() - sum_squares
C[0, 0] = n_samples ** 2 - C[0, 1] - C[1, 0] - sum_squares
return C
def rand_score(labels_true, labels_pred):
"""Rand index.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is:
RI = (number of agreeing pairs) / (number of pairs)
Read more in the :ref:`User Guide <rand_score>`.
Parameters
----------
labels_true : array-like of shape (n_samples,), dtype=integral
Ground truth class labels to be used as a reference.
labels_pred : array-like of shape (n_samples,), dtype=integral
Cluster labels to evaluate.
Returns
-------
RI : float
Similarity score between 0.0 and 1.0, inclusive, 1.0 stands for
perfect match.
See Also
--------
adjusted_rand_score: Adjusted Rand Score
adjusted_mutual_info_score: Adjusted Mutual Information
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import rand_score
>>> rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete but may not always be pure, hence penalized:
>>> rand_score([0, 0, 1, 2], [0, 0, 1, 1])
0.83...
References
----------
.. L. Hubert and P. Arabie, Comparing Partitions, Journal of
Classification 1985
https://link.springer.com/article/10.1007%2FBF01908075
.. https://en.wikipedia.org/wiki/Simple_matching_coefficient
.. https://en.wikipedia.org/wiki/Rand_index
"""
contingency = pair_confusion_matrix(labels_true, labels_pred)
numerator = contingency.diagonal().sum()
denominator = contingency.sum()
if numerator == denominator or denominator == 0:
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique
# cluster. These are perfect matches hence return 1.0.
return 1.0
return numerator / denominator
def adjusted_rand_score(labels_true, labels_pred):
"""Rand index adjusted for chance.
The Rand Index computes a similarity measure between two clusterings
by considering all pairs of samples and counting pairs that are
assigned in the same or different clusters in the predicted and
true clusterings.
The raw RI score is then "adjusted for chance" into the ARI score
using the following scheme::
ARI = (RI - Expected_RI) / (max(RI) - Expected_RI)
The adjusted Rand index is thus ensured to have a value close to
0.0 for random labeling independently of the number of clusters and
samples and exactly 1.0 when the clusterings are identical (up to
a permutation).
ARI is a symmetric measure::
adjusted_rand_score(a, b) == adjusted_rand_score(b, a)
Read more in the :ref:`User Guide <adjusted_rand_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array-like of shape (n_samples,)
Cluster labels to evaluate
Returns
-------
ARI : float
Similarity score between -1.0 and 1.0. Random labelings have an ARI
close to 0.0. 1.0 stands for perfect match.
Examples
--------
Perfectly matching labelings have a score of 1 even
>>> from sklearn.metrics.cluster import adjusted_rand_score
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> adjusted_rand_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete but may not always be pure, hence penalized::
>>> adjusted_rand_score([0, 0, 1, 2], [0, 0, 1, 1])
0.57...
ARI is symmetric, so labelings that have pure clusters with members
coming from the same classes but unnecessary splits are penalized::
>>> adjusted_rand_score([0, 0, 1, 1], [0, 0, 1, 2])
0.57...
If classes members are completely split across different clusters, the
assignment is totally incomplete, hence the ARI is very low::
>>> adjusted_rand_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [Hubert1985] L. Hubert and P. Arabie, Comparing Partitions,
Journal of Classification 1985
https://link.springer.com/article/10.1007%2FBF01908075
.. [Steinley2004] D. Steinley, Properties of the Hubert-Arabie
adjusted Rand index, Psychological Methods 2004
.. [wk] https://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index
See Also
--------
adjusted_mutual_info_score : Adjusted Mutual Information.
"""
(tn, fp), (fn, tp) = pair_confusion_matrix(labels_true, labels_pred)
# convert to Python integer types, to avoid overflow or underflow
tn, fp, fn, tp = int(tn), int(fp), int(fn), int(tp)
# Special cases: empty data or full agreement
if fn == 0 and fp == 0:
return 1.0
return 2.0 * (tp * tn - fn * fp) / ((tp + fn) * (fn + tn) + (tp + fp) * (fp + tn))
def homogeneity_completeness_v_measure(labels_true, labels_pred, *, beta=1.0):
"""Compute the homogeneity and completeness and V-Measure scores at once.
Those metrics are based on normalized conditional entropy measures of
the clustering labeling to evaluate given the knowledge of a Ground
Truth class labels of the same samples.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
Both scores have positive values between 0.0 and 1.0, larger values
being desirable.
Those 3 metrics are independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score values in any way.
V-Measure is furthermore symmetric: swapping ``labels_true`` and
``label_pred`` will give the same score. This does not hold for
homogeneity and completeness. V-Measure is identical to
:func:`normalized_mutual_info_score` with the arithmetic averaging
method.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array-like of shape (n_samples,)
cluster labels to evaluate
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
v_measure : float
harmonic mean of the first two
See Also
--------
homogeneity_score
completeness_score
v_measure_score
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
if len(labels_true) == 0:
return 1.0, 1.0, 1.0
entropy_C = entropy(labels_true)
entropy_K = entropy(labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
MI = mutual_info_score(None, None, contingency=contingency)
homogeneity = MI / (entropy_C) if entropy_C else 1.0
completeness = MI / (entropy_K) if entropy_K else 1.0
if homogeneity + completeness == 0.0:
v_measure_score = 0.0
else:
v_measure_score = (
(1 + beta)
* homogeneity
* completeness
/ (beta * homogeneity + completeness)
)
return homogeneity, completeness, v_measure_score
def homogeneity_score(labels_true, labels_pred):
"""Homogeneity metric of a cluster labeling given a ground truth.
A clustering result satisfies homogeneity if all of its clusters
contain only data points which are members of a single class.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`completeness_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array-like of shape (n_samples,)
cluster labels to evaluate
Returns
-------
homogeneity : float
score between 0.0 and 1.0. 1.0 stands for perfectly homogeneous labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See Also
--------
completeness_score
v_measure_score
Examples
--------
Perfect labelings are homogeneous::
>>> from sklearn.metrics.cluster import homogeneity_score
>>> homogeneity_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that further split classes into more clusters can be
perfectly homogeneous::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 1, 2]))
1.000000
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 2, 3]))
1.000000
Clusters that include samples from different classes do not make for an
homogeneous labeling::
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0...
>>> print("%.6f" % homogeneity_score([0, 0, 1, 1], [0, 0, 0, 0]))
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[0]
def completeness_score(labels_true, labels_pred):
"""Completeness metric of a cluster labeling given a ground truth.
A clustering result satisfies completeness if all the data points
that are members of a given class are elements of the same cluster.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is not symmetric: switching ``label_true`` with ``label_pred``
will return the :func:`homogeneity_score` which will be different in
general.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array-like of shape (n_samples,)
cluster labels to evaluate
Returns
-------
completeness : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See Also
--------
homogeneity_score
v_measure_score
Examples
--------
Perfect labelings are complete::
>>> from sklearn.metrics.cluster import completeness_score
>>> completeness_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Non-perfect labelings that assign all classes members to the same clusters
are still complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 0, 0, 0]))
1.0
>>> print(completeness_score([0, 1, 2, 3], [0, 0, 1, 1]))
0.999...
If classes members are split across different clusters, the
assignment cannot be complete::
>>> print(completeness_score([0, 0, 1, 1], [0, 1, 0, 1]))
0.0
>>> print(completeness_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred)[1]
def v_measure_score(labels_true, labels_pred, *, beta=1.0):
"""V-measure cluster labeling given a ground truth.
This score is identical to :func:`normalized_mutual_info_score` with
the ``'arithmetic'`` option for averaging.
The V-measure is the harmonic mean between homogeneity and completeness::
v = (1 + beta) * homogeneity * completeness
/ (beta * homogeneity + completeness)
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <homogeneity_completeness>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
ground truth class labels to be used as a reference
labels_pred : array-like of shape (n_samples,)
cluster labels to evaluate
beta : float, default=1.0
Ratio of weight attributed to ``homogeneity`` vs ``completeness``.
If ``beta`` is greater than 1, ``completeness`` is weighted more
strongly in the calculation. If ``beta`` is less than 1,
``homogeneity`` is weighted more strongly.
Returns
-------
v_measure : float
score between 0.0 and 1.0. 1.0 stands for perfectly complete labeling
References
----------
.. [1] `Andrew Rosenberg and Julia Hirschberg, 2007. V-Measure: A
conditional entropy-based external cluster evaluation measure
<https://aclweb.org/anthology/D/D07/D07-1043.pdf>`_
See Also
--------
homogeneity_score
completeness_score
normalized_mutual_info_score
Examples
--------
Perfect labelings are both homogeneous and complete, hence have score 1.0::
>>> from sklearn.metrics.cluster import v_measure_score
>>> v_measure_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> v_measure_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
Labelings that assign all classes members to the same clusters
are complete be not homogeneous, hence penalized::
>>> print("%.6f" % v_measure_score([0, 0, 1, 2], [0, 0, 1, 1]))
0.8...
>>> print("%.6f" % v_measure_score([0, 1, 2, 3], [0, 0, 1, 1]))
0.66...
Labelings that have pure clusters with members coming from the same
classes are homogeneous but un-necessary splits harms completeness
and thus penalize V-measure as well::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 1, 2]))
0.8...
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 1, 2, 3]))
0.66...
If classes members are completely split across different clusters,
the assignment is totally incomplete, hence the V-Measure is null::
>>> print("%.6f" % v_measure_score([0, 0, 0, 0], [0, 1, 2, 3]))
0.0...
Clusters that include samples from totally different classes totally
destroy the homogeneity of the labeling, hence::
>>> print("%.6f" % v_measure_score([0, 0, 1, 1], [0, 0, 0, 0]))
0.0...
"""
return homogeneity_completeness_v_measure(labels_true, labels_pred, beta=beta)[2]
def mutual_info_score(labels_true, labels_pred, *, contingency=None):
"""Mutual Information between two clusterings.
The Mutual Information is a measure of the similarity between two labels
of the same data. Where :math:`|U_i|` is the number of the samples
in cluster :math:`U_i` and :math:`|V_j|` is the number of the
samples in cluster :math:`V_j`, the Mutual Information
between clusterings :math:`U` and :math:`V` is given as:
.. math::
MI(U,V)=\\sum_{i=1}^{|U|} \\sum_{j=1}^{|V|} \\frac{|U_i\\cap V_j|}{N}
\\log\\frac{N|U_i \\cap V_j|}{|U_i||V_j|}
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching :math:`U` (i.e
``label_true``) with :math:`V` (i.e. ``label_pred``) will return the
same score value. This can be useful to measure the agreement of two
independent label assignments strategies on the same dataset when the
real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets, called :math:`U` in
the above formula.
labels_pred : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets, called :math:`V` in
the above formula.
contingency : {ndarray, sparse matrix} of shape \
(n_classes_true, n_classes_pred), default=None
A contingency matrix given by the :func:`contingency_matrix` function.
If value is ``None``, it will be computed, otherwise the given value is
used, with ``labels_true`` and ``labels_pred`` ignored.
Returns
-------
mi : float
Mutual information, a non-negative value, measured in nats using the
natural logarithm.
Notes
-----
The logarithm used is the natural logarithm (base-e).
See Also
--------
adjusted_mutual_info_score : Adjusted against chance Mutual Information.
normalized_mutual_info_score : Normalized Mutual Information.
"""
if contingency is None:
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
else:
contingency = check_array(
contingency,
accept_sparse=["csr", "csc", "coo"],
dtype=[int, np.int32, np.int64],
)
if isinstance(contingency, np.ndarray):
# For an array
nzx, nzy = np.nonzero(contingency)
nz_val = contingency[nzx, nzy]
elif sp.issparse(contingency):
# For a sparse matrix
nzx, nzy, nz_val = sp.find(contingency)
else:
raise ValueError("Unsupported type for 'contingency': %s" % type(contingency))
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = pi.take(nzx).astype(np.int64, copy=False) * pj.take(nzy).astype(
np.int64, copy=False
)
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (
contingency_nm * (log_contingency_nm - log(contingency_sum))
+ contingency_nm * log_outer
)
mi = np.where(np.abs(mi) < np.finfo(mi.dtype).eps, 0.0, mi)
return np.clip(mi.sum(), 0.0, None)
def adjusted_mutual_info_score(
labels_true, labels_pred, *, average_method="arithmetic"
):
"""Adjusted Mutual Information between two clusterings.
Adjusted Mutual Information (AMI) is an adjustment of the Mutual
Information (MI) score to account for chance. It accounts for the fact that
the MI is generally higher for two clusterings with a larger number of
clusters, regardless of whether there is actually more information shared.
For two clusterings :math:`U` and :math:`V`, the AMI is given as::
AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))]
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching :math:`U` (``label_true``)
with :math:`V` (``labels_pred``) will return the same score value. This can
be useful to measure the agreement of two independent label assignments
strategies on the same dataset when the real ground truth is not known.
Be mindful that this function is an order of magnitude slower than other
metrics, such as the Adjusted Rand Index.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets, called :math:`U` in
the above formula.
labels_pred : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets, called :math:`V` in
the above formula.
average_method : str, default='arithmetic'
How to compute the normalizer in the denominator. Possible options
are 'min', 'geometric', 'arithmetic', and 'max'.
.. versionadded:: 0.20
.. versionchanged:: 0.22
The default value of ``average_method`` changed from 'max' to
'arithmetic'.
Returns
-------
ami: float (upperlimited by 1.0)
The AMI returns a value of 1 when the two partitions are identical
(ie perfectly matched). Random partitions (independent labellings) have
an expected AMI around 0 on average hence can be negative. The value is
in adjusted nats (based on the natural logarithm).
See Also
--------
adjusted_rand_score : Adjusted Rand Index.
mutual_info_score : Mutual Information (not adjusted for chance).
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import adjusted_mutual_info_score
>>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
... # doctest: +SKIP
1.0
>>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
... # doctest: +SKIP
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the AMI is null::
>>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
... # doctest: +SKIP
0.0
References
----------
.. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for
Clusterings Comparison: Variants, Properties, Normalization and
Correction for Chance, JMLR
<http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_
.. [2] `Wikipedia entry for the Adjusted Mutual Information
<https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
n_samples = labels_true.shape[0]
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (
classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64, **_astype_copy_false(contingency))
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)
# Calculate the expected value for the mutual information
emi = expected_mutual_information(contingency, n_samples)
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
normalizer = _generalized_average(h_true, h_pred, average_method)
denominator = normalizer - emi
# Avoid 0.0 / 0.0 when expectation equals maximum, i.e a perfect match.
# normalizer should always be >= emi, but because of floating-point
# representation, sometimes emi is slightly larger. Correct this
# by preserving the sign.
if denominator < 0:
denominator = min(denominator, -np.finfo("float64").eps)
else:
denominator = max(denominator, np.finfo("float64").eps)
ami = (mi - emi) / denominator
return ami
def normalized_mutual_info_score(
labels_true, labels_pred, *, average_method="arithmetic"
):
"""Normalized Mutual Information between two clusterings.
Normalized Mutual Information (NMI) is a normalization of the Mutual
Information (MI) score to scale the results between 0 (no mutual
information) and 1 (perfect correlation). In this function, mutual
information is normalized by some generalized mean of ``H(labels_true)``
and ``H(labels_pred))``, defined by the `average_method`.
This measure is not adjusted for chance. Therefore
:func:`adjusted_mutual_info_score` might be preferred.
This metric is independent of the absolute values of the labels:
a permutation of the class or cluster label values won't change the
score value in any way.
This metric is furthermore symmetric: switching ``label_true`` with
``label_pred`` will return the same score value. This can be useful to
measure the agreement of two independent label assignments strategies
on the same dataset when the real ground truth is not known.
Read more in the :ref:`User Guide <mutual_info_score>`.
Parameters
----------
labels_true : int array, shape = [n_samples]
A clustering of the data into disjoint subsets.
labels_pred : int array-like of shape (n_samples,)
A clustering of the data into disjoint subsets.
average_method : str, default='arithmetic'
How to compute the normalizer in the denominator. Possible options
are 'min', 'geometric', 'arithmetic', and 'max'.
.. versionadded:: 0.20
.. versionchanged:: 0.22
The default value of ``average_method`` changed from 'geometric' to
'arithmetic'.
Returns
-------
nmi : float
Score between 0.0 and 1.0 in normalized nats (based on the natural
logarithm). 1.0 stands for perfectly complete labeling.
See Also
--------
v_measure_score : V-Measure (NMI with arithmetic mean option).
adjusted_rand_score : Adjusted Rand Index.
adjusted_mutual_info_score : Adjusted Mutual Information (adjusted
against chance).
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import normalized_mutual_info_score
>>> normalized_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1])
... # doctest: +SKIP
1.0
>>> normalized_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0])
... # doctest: +SKIP
1.0
If classes members are completely split across different clusters,
the assignment is totally in-complete, hence the NMI is null::
>>> normalized_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3])
... # doctest: +SKIP
0.0
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
classes = np.unique(labels_true)
clusters = np.unique(labels_pred)
# Special limit cases: no clustering since the data is not split.
# This is a perfect match hence return 1.0.
if (
classes.shape[0] == clusters.shape[0] == 1
or classes.shape[0] == clusters.shape[0] == 0
):
return 1.0
contingency = contingency_matrix(labels_true, labels_pred, sparse=True)
contingency = contingency.astype(np.float64, **_astype_copy_false(contingency))
# Calculate the MI for the two clusterings
mi = mutual_info_score(labels_true, labels_pred, contingency=contingency)
# Calculate the expected value for the mutual information
# Calculate entropy for each labeling
h_true, h_pred = entropy(labels_true), entropy(labels_pred)
normalizer = _generalized_average(h_true, h_pred, average_method)
# Avoid 0.0 / 0.0 when either entropy is zero.
normalizer = max(normalizer, np.finfo("float64").eps)
nmi = mi / normalizer
return nmi
def fowlkes_mallows_score(labels_true, labels_pred, *, sparse=False):
"""Measure the similarity of two clusterings of a set of points.
.. versionadded:: 0.18
The Fowlkes-Mallows index (FMI) is defined as the geometric mean between of
the precision and recall::
FMI = TP / sqrt((TP + FP) * (TP + FN))
Where ``TP`` is the number of **True Positive** (i.e. the number of pair of
points that belongs in the same clusters in both ``labels_true`` and
``labels_pred``), ``FP`` is the number of **False Positive** (i.e. the
number of pair of points that belongs in the same clusters in
``labels_true`` and not in ``labels_pred``) and ``FN`` is the number of
**False Negative** (i.e the number of pair of points that belongs in the
same clusters in ``labels_pred`` and not in ``labels_True``).
The score ranges from 0 to 1. A high value indicates a good similarity
between two clusters.
Read more in the :ref:`User Guide <fowlkes_mallows_scores>`.
Parameters
----------
labels_true : int array, shape = (``n_samples``,)
A clustering of the data into disjoint subsets.
labels_pred : array, shape = (``n_samples``, )
A clustering of the data into disjoint subsets.
sparse : bool, default=False
Compute contingency matrix internally with sparse matrix.
Returns
-------
score : float
The resulting Fowlkes-Mallows score.
Examples
--------
Perfect labelings are both homogeneous and complete, hence have
score 1.0::
>>> from sklearn.metrics.cluster import fowlkes_mallows_score
>>> fowlkes_mallows_score([0, 0, 1, 1], [0, 0, 1, 1])
1.0
>>> fowlkes_mallows_score([0, 0, 1, 1], [1, 1, 0, 0])
1.0
If classes members are completely split across different clusters,
the assignment is totally random, hence the FMI is null::
>>> fowlkes_mallows_score([0, 0, 0, 0], [0, 1, 2, 3])
0.0
References
----------
.. [1] `E. B. Fowkles and C. L. Mallows, 1983. "A method for comparing two
hierarchical clusterings". Journal of the American Statistical
Association
<https://www.tandfonline.com/doi/abs/10.1080/01621459.1983.10478008>`_
.. [2] `Wikipedia entry for the Fowlkes-Mallows Index
<https://en.wikipedia.org/wiki/Fowlkes-Mallows_index>`_
"""
labels_true, labels_pred = check_clusterings(labels_true, labels_pred)
(n_samples,) = labels_true.shape
c = contingency_matrix(labels_true, labels_pred, sparse=True)
c = c.astype(np.int64, **_astype_copy_false(c))
tk = np.dot(c.data, c.data) - n_samples
pk = np.sum(np.asarray(c.sum(axis=0)).ravel() ** 2) - n_samples
qk = np.sum(np.asarray(c.sum(axis=1)).ravel() ** 2) - n_samples
return np.sqrt(tk / pk) * np.sqrt(tk / qk) if tk != 0.0 else 0.0
def entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : int array, shape = [n_samples]
The labels
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float64)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - log(pi_sum)))
| |
# coding: utf8
from datetime import datetime
import datetime as dates # Ah, what a mess these python names
import gluon.contrib.simplejson as simplejson
STRING_FIELD_LENGTH = 255 # Default length of string fields.
MAX_TEXT_LENGTH = 786432
MAX_DATE = datetime(dates.MAXYEAR, 12, 1)
MAX_GRADE = 10.0
current.MAX_GRADE = MAX_GRADE
REVIEW_HELPFULNESS_LIST = [
('-2', '-2 : Factually wrong, bogus'),
('-1', '-1 : Unhelpful'),
('0', ' 0 : Neutral'),
('1', '+1 : Somewhat helpful'),
('2', '+2 : Very helpful'),
]
db.auth_user._format='%(email)s'
db.define_table('user_list',
Field('name', length=STRING_FIELD_LENGTH),
Field('creation_date', 'datetime', default=datetime.utcnow()),
Field('managers', 'list:string'),
Field('user_list', 'list:string'),
#TODO(luca): add a 'managed' field, and a table of users,
# to allow managing very large sets of users via an API.
format = '%(name)s',
)
def represent_user_list(v, r):
ul = db.user_list(v)
if ul is None:
return A(T('Nobody'), _href=URL('user_lists', 'index', user_signature=True))
else:
return A(ul.name, _href=URL('user_lists', 'index',
args=['view', 'user_list', v], user_signature=True))
db.user_list.id.readable = db.user_list.id.writable = False
db.user_list.creation_date.writable = db.user_list.creation_date.readable = False
db.user_list.name.required = True
db.user_list.user_list.requires = [IS_LIST_OF(IS_EMAIL())]
db.user_list.managers.requires = [IS_LIST_OF(IS_EMAIL())]
db.user_list.user_list.label = 'Students'
db.define_table('user_properties',
Field('user'), # Primary key
Field('managed_user_lists', 'list:reference user_list'),
Field('venues_can_manage', 'list:reference venue'),
Field('venues_can_observe', 'list:reference venue'),
Field('venues_can_submit', 'list:reference venue'),
Field('venues_can_rate', 'list:reference venue'),
Field('venues_has_submitted', 'list:reference venue'),
Field('venues_has_rated', 'list:reference venue'),
# List of venues where the user has redone reviews.
# If the user do it twice then venue_id appears twice in the list.
Field('venues_has_re_reviewed', 'list:reference venue'),
)
db.user_properties.user.required = True
db.define_table('venue',
Field('name', length=STRING_FIELD_LENGTH),
Field('institution', length=STRING_FIELD_LENGTH, required=True),
Field('description', 'text'), # key for keystore
Field('creation_date', 'datetime', default=datetime.utcnow()),
Field('created_by', default=get_user_email()),
Field('managers', 'list:string'),
Field('observers', 'list:string'),
Field('submit_constraint', db.user_list, ondelete='SET NULL'),
Field('rate_constraint', db.user_list, ondelete='SET NULL'),
Field('raters_equal_submitters', 'boolean', default=True),
Field('open_date', 'datetime', required=True),
Field('close_date', 'datetime', required=True),
Field('rate_open_date', 'datetime', required=True),
Field('rate_close_date', 'datetime', required=True),
Field('allow_multiple_submissions', 'boolean', default=False),
Field('submission_instructions', 'text'), # key for keystore
Field('allow_link_submission', 'boolean', default=False),
Field('allow_file_upload', 'boolean', default=True),
Field('is_active', 'boolean', required=True, default=True),
Field('is_approved', 'boolean', required=True, default=False),
Field('submissions_are_anonymized', 'boolean', default=True),
Field('can_rank_own_submissions', 'boolean', default=False),
Field('max_number_outstanding_reviews', 'integer', default=1),
Field('feedback_is_anonymous', 'boolean', default=True),
Field('submissions_visible_to_all', 'boolean', default=False),
Field('submissions_visible_immediately', 'boolean', default=False),
Field('feedback_accessible_immediately', 'boolean', default=False),
Field('feedback_available_to_all', 'boolean', default=False),
Field('rating_available_to_all', 'boolean', default=False),
Field('rater_contributions_visible_to_all', default=False),
Field('number_of_submissions_per_reviewer', 'integer', default=6),
Field('reviews_as_percentage_of_grade', 'float', default=25),
Field('latest_grades_date', 'datetime'),
Field('grades_released', 'boolean', default=False),
Field('ranking_algo_description', length=STRING_FIELD_LENGTH),
Field('grading_instructions', 'text'), # key for keystore
format = '%(name)s',
)
def represent_venue_name(v, r):
return A(v, _href=URL('venues', 'view_venue', args=[r.id]))
def represent_venue_id(v, r):
venue = db.venue(v)
if venue is None:
return 'None'
return A(venue.name, _href=URL('venues', 'view_venue', args=[venue.id]))
def represent_date(v, r):
if v == MAX_DATE or v is None:
return ''
return v.strftime('%Y-%m-%d %H:%M:%S UTC')
def represent_text_field(v, r):
s = keystore_read(v)
if s is None:
return ''
else:
return MARKMIN(s)
def represent_plain_text_field(v, r):
s = keystore_read(v)
if s is None:
return ''
else:
return s
def represent_percentage(v, r):
if v is None:
return 'None'
return ("%3.0f%%" % v)
def represent_01_as_percentage(v, r):
if v is None:
return 'None'
return ("%3.0f%%" % (v * 100))
def represent_quality(v, r):
if v is None:
return 'None'
return ("%.2f" % v)
def represent_quality_10(v, r):
if v is None:
return 'None'
return ("%.2f" % (v * 10.0))
db.venue.description.represent = represent_text_field
db.venue.created_by.readable = db.venue.created_by.writable = False
db.venue.submit_constraint.represent = represent_user_list
db.venue.rate_constraint.represent = represent_user_list
db.venue.name.label = T('Assignment')
db.venue.name.represent = represent_venue_name
db.venue.name.required = True
db.venue.name.requires = IS_LENGTH(minsize=16)
db.venue.grading_instructions.readable = db.venue.grading_instructions.writable = False
db.venue.grading_instructions.represent = represent_text_field
db.venue.is_approved.writable = False
db.venue.creation_date.writable = db.venue.creation_date.readable = False
db.venue.creation_date.represent = represent_date
db.venue.id.readable = db.venue.id.writable = False
db.venue.is_active.label = 'Active'
db.venue.submit_constraint.label = 'List of students'
db.venue.raters_equal_submitters.readable = db.venue.raters_equal_submitters.writable = False
db.venue.rate_constraint.label = 'Who can rate'
db.venue.open_date.label = 'Submission opening date'
db.venue.open_date.default = datetime.utcnow()
db.venue.close_date.label = 'Submission deadline'
db.venue.close_date.default = datetime.utcnow()
db.venue.rate_open_date.label = 'Reviewing start date'
db.venue.rate_open_date.default = datetime.utcnow()
db.venue.rate_close_date.label = 'Reviewing deadline'
db.venue.rate_close_date.default = datetime.utcnow()
db.venue.max_number_outstanding_reviews.requires = IS_INT_IN_RANGE(1, 100,
error_message=T('Enter a number between 0 and 100.'))
db.venue.max_number_outstanding_reviews.readable = db.venue.max_number_outstanding_reviews.writable = False
db.venue.latest_grades_date.writable = False
db.venue.ranking_algo_description.writable = False
db.venue.ranking_algo_description.readable = False
db.venue.number_of_submissions_per_reviewer.writable = False
db.venue.submission_instructions.represent = represent_text_field
db.venue.submissions_are_anonymized.readable = db.venue.submissions_are_anonymized.writable = False
db.venue.allow_multiple_submissions.readable = db.venue.allow_multiple_submissions.writable = False
db.venue.feedback_available_to_all.default = False
db.venue.feedback_available_to_all.readable = db.venue.feedback_available_to_all.writable = False
db.venue.submissions_visible_immediately.default = False
db.venue.submissions_visible_immediately.readable = db.venue.submissions_visible_immediately.writable = False
db.venue.can_rank_own_submissions.readable = db.venue.can_rank_own_submissions.writable = False
db.venue.submissions_visible_to_all.readable = db.venue.submissions_visible_to_all.writable = False
db.venue.can_rank_own_submissions.readable = db.venue.can_rank_own_submissions.writable = False
db.venue.feedback_accessible_immediately.readable = db.venue.feedback_accessible_immediately.writable = False
db.venue.feedback_is_anonymous.readable = db.venue.feedback_is_anonymous.writable = False
db.venue.rating_available_to_all.readable = db.venue.rating_available_to_all.writable = False
db.venue.rater_contributions_visible_to_all.readable = db.venue.rater_contributions_visible_to_all.writable = False
db.venue.reviews_as_percentage_of_grade.writable = False
db.venue.reviews_as_percentage_of_grade.requires = IS_FLOAT_IN_RANGE(0, 100,
error_message=T('Please enter a percentage between 0 and 100.'))
db.venue.reviews_as_percentage_of_grade.represent = represent_percentage
db.venue.open_date.represent = db.venue.close_date.represent = represent_date
db.venue.rate_open_date.represent = db.venue.rate_close_date.represent = represent_date
db.venue.grades_released.readable = db.venue.grades_released.writable = False
db.define_table('submission',
Field('user', default=get_user_email()),
Field('date_created', 'datetime'),
Field('date_updated', 'datetime'),
Field('venue_id', db.venue),
Field('original_filename', length=STRING_FIELD_LENGTH),
Field('content', 'upload'),
Field('link', length=512),
Field('comment', 'text'), # Key to keystore. Of the person doing the submission.
Field('quality', 'double'), # Of active learning algo, to assign submissions.
Field('error', 'double'), # Of active learning algo, to assign submissions.
Field('true_quality', 'double'), # DEPRECATED. Grade by TA/instructor.
Field('percentile', 'double'), # DEPRECATED.
Field('n_assigned_reviews', 'integer', default=0),
Field('n_completed_reviews', 'integer', default=0),
Field('n_rejected_reviews', 'integer', default=0),
Field('feedback', 'text'), # Key to keystore. Of a TA, grader, etc. Visible to students.
)
db.submission.user.label = T('Student')
db.submission.id.readable = db.submission.id.writable = False
db.submission.user.writable = False
db.submission.date_created.default = datetime.utcnow()
db.submission.date_created.represent = represent_date
db.submission.date_updated.default = datetime.utcnow()
db.submission.date_updated.represent = represent_date
db.submission.date_created.writable = False
db.submission.date_updated.writable = False
db.submission.original_filename.readable = db.submission.original_filename.writable = False
db.submission.venue_id.readable = db.submission.venue_id.writable = False
db.submission.venue_id.label = T('Assignment')
db.submission.venue_id.represent = represent_venue_id
db.submission.quality.readable = db.submission.quality.writable = False
db.submission.error.readable = db.submission.error.writable = False
db.submission.link.readable = db.submission.link.writable = False
db.submission.link.requires = IS_URL()
db.submission.n_assigned_reviews.writable = db.submission.n_assigned_reviews.readable = False
db.submission.n_completed_reviews.writable = False
db.submission.n_completed_reviews.label = T('Reviews with eval')
db.submission.n_rejected_reviews.writable = False
db.submission.n_rejected_reviews.label = T('Reviews w/o eval')
db.submission.feedback.label = T('Instructor Feedback')
db.submission.quality.represent = represent_quality
db.submission.error.represent = represent_quality
db.submission.comment.represent = represent_text_field
db.submission.comment.label = T('Content')
db.submission.feedback.represent = represent_text_field
db.submission.feedback.label = T('Instructor feedback')
db.submission.content.label = T('File upload')
db.submission.link.represent = lambda v, r: A(v, _href=v)
# Remove when deprecating.
db.submission.percentile.readable = db.submission.percentile.writable = False
db.submission.percentile.represent = represent_percentage
db.submission.true_quality.readable = db.submission.true_quality.writable = False
db.submission.true_quality.label = T('Control Grade')
def represent_double3(v, r):
if v is None:
return 'None'
return ("%.3f" % v)
def represent_grades(v, r, breaker=BR()):
if v is None:
return 'None'
try:
d = simplejson.loads(v)
id_to_nicks = simplejson.loads(r.submission_nicknames)
l = []
sorted_sub = []
for k, w in d.iteritems():
sorted_sub.append((int(k), float(w)))
sorted_sub = sorted(sorted_sub, key = lambda el : el[1], reverse = True)
for k, w, in sorted_sub:
nick = id_to_nicks.get(str(k), '???')
l.append(SPAN(A(nick, _href=URL('feedback', 'view_feedback', args=['s', k])),
SPAN(': '), '{:5.2f}'.format(w), breaker))
attributes = {}
return SPAN(*l, **attributes)
except Exception, e:
return '-- data error --'
def represent_grades_compact(v, r):
return represent_grades(v, r, breaker='; ')
db.define_table('comparison', # An ordering of submissions, from Best to Worst.
Field('user', default=get_user_email()),
Field('date', 'datetime', default=datetime.utcnow()),
Field('venue_id', db.venue),
Field('ordering', 'list:reference submission'),
Field('grades', length=512), # This is a json dictionary of submission_id: grade
Field('submission_nicknames', length=256), # This is a json dictionary mapping submission ids into strings for visualization
Field('is_valid', 'boolean', default=True),
)
def represent_ordering(v, r):
if v is None:
return ''
try:
id_to_nicks = simplejson.loads(r.submission_nicknames)
urls = [SPAN(A(str(id_to_nicks.get(str(el), '')),
_href=URL('feedback', 'view_feedback', args=['s', el])), ' ')
for el in v]
attributes = {}
return SPAN(*urls, **attributes)
except Exception, e:
return '-- data error --'
db.comparison.user.label = T('Student')
db.comparison.grades.represent = represent_grades_compact
db.comparison.venue_id.represent = represent_venue_id
db.comparison.venue_id.label = T('Assignment')
db.comparison.submission_nicknames.readable = db.comparison.submission_nicknames.writable = False
db.comparison.ordering.represent = represent_ordering
db.comparison.date.represent = represent_date
db.comparison.is_valid.readable = False
# Similar to above, but used for logging of comparison events.
db.define_table('comparison_history',
Field('user', default=get_user_email()),
Field('date', 'datetime', default=datetime.utcnow()),
Field('venue_id', db.venue),
Field('ordering', 'list:reference submission'),
Field('grades', length=512), # This is a json dictionary of submission_id: grade
)
def represent_helpfulness(v, r):
if v is None:
return ''
try:
i = int(v)
except Exception, e:
return v
return "%+02d" % i
db.define_table('task', # Tasks a user should complete for reviewing.
Field('user', default=get_user_email()),
Field('submission_id', db.submission),
Field('venue_id', db.venue),
Field('submission_name'), # Key to keystore. Name of the submission from the point of view of the user.
Field('assigned_date', 'datetime', default=datetime.utcnow()),
Field('completed_date', 'datetime', default=MAX_DATE),
Field('is_completed', 'boolean', default=False),
Field('rejected', 'boolean', default=False),
Field('comments', 'text'), # Key to keystore. This is the review.
Field('grade', 'double'), # This is the grade that the student assigned.
Field('helpfulness'),
Field('feedback', 'text'), # Key to keystore. This is the feedback to the review.
)
db.task.user.label = T('Student')
db.task.id.readable = db.task.id.writable = False
db.task.user.readable = db.task.user.writable = False
db.task.submission_id.readable = db.task.submission_id.writable = False
db.task.venue_id.readable = db.task.venue_id.writable = False
db.task.assigned_date.writable = False
db.task.assigned_date.label = T('Date review assigned')
db.task.completed_date.writable = False
db.task.is_completed.writable = db.task.is_completed.readable = False
db.task.submission_name.writable = False
db.task.submission_name.represent = represent_plain_text_field
db.task.rejected.readable = db.task.rejected.writable = False
db.task.comments.readable = db.task.comments.writable = False
db.task.comments.represent = represent_text_field
db.task.comments.label = T('Reviewer comments')
db.task.rejected.label = T('Declined to evaluate')
db.task.helpfulness.readable = db.task.helpfulness.writable = False
db.task.feedback.readable = db.task.feedback.writable = False
db.task.feedback.represent = represent_text_field
db.task.feedback.label = T('Review feedback')
db.task.helpfulness.label = T('Review helpfulness')
db.task.venue_id.label = T('Assignment')
db.task.venue_id.represent = represent_venue_id
db.task.assigned_date.represent = represent_date
db.task.completed_date.represent = represent_date
db.task.helpfulness.requires = IS_IN_SET(REVIEW_HELPFULNESS_LIST)
db.task.helpfulness.represent = represent_helpfulness
db.task.grade.readable = db.task.grade.writable = False
db.define_table('grades',
Field('venue_id', db.venue, required=True),
Field('user', required=True),
Field('submission_grade', 'double'), # Computed crowd-grade
Field('submission_percentile', 'double'), # Quality percentile of submission.
Field('submission_control_grade', 'double'), # Assigned by a TA by reviewing the submission.
Field('accuracy', 'double'), # "reviewer" grade
Field('accuracy_percentile', 'double'),
Field('reputation', 'double'), # For our info.
Field('n_ratings', 'integer'),
Field('grade', 'double'), # Algo-assigned final grade.
Field('percentile', 'double'), # Percentile of the final grade
Field('assigned_grade', 'double'), # Assigned by instructor, due to percentile.
)
db.grades.id.readable = db.grades.id.writable = False
db.grades.percentile.represent = represent_percentage
db.grades.submission_grade.writable = False
db.grades.submission_grade.represent = represent_quality
db.grades.submission_grade.label = T('Submission crowd-grade')
db.grades.submission_percentile.label = T('Submission %')
db.grades.submission_percentile.represent = represent_percentage
db.grades.submission_percentile.writable = False
db.grades.user.writable = False
db.grades.venue_id.represent = represent_venue_id
db.grades.venue_id.label = T('Assignment')
db.grades.venue_id.writable = False
db.grades.accuracy.represent = represent_01_as_percentage
db.grades.accuracy.writable = False
db.grades.accuracy_percentile.represent = represent_percentage
db.grades.accuracy_percentile.writable = False
db.grades.reputation.represent = represent_percentage
db.grades.assigned_grade.label = "Final grade"
db.grades.assigned_grade.represent = represent_quality
db.grades.n_ratings.writable = False
db.grades.reputation.readable = db.grades.reputation.writable = False
db.grades.percentile.label = T('Overall percentile')
db.grades.percentile.writable = False
db.grades.grade.writable = False
db.grades.grade.label = T('Overall crowd-grade')
db.grades.percentile.label = T('Overall %')
db.grades.grade.represent = represent_quality
db.grades.submission_control_grade.readable = db.grades.submission_control_grade.writable = False
db.grades.submission_control_grade.represent = represent_quality
db.grades.submission_control_grade.label = T('Control grade')
# This table stores experimental grades for submissions, derived with
# experimental algorithms.
db.define_table('grades_exp',
Field('venue_id', db.venue, required=True),
Field('user', required=True),
Field('run_id'),
Field('subm_grade', 'double'), # Algo grade of submission
Field('subm_percent', 'double'), # Algo percentile of submission
Field('subm_confidence', 'double'), # Confidence in algo grade of submission
Field('review_grade', 'double'),
Field('review_percent', 'double'),
Field('n_ratings', 'integer'),
Field('reputation', 'double'),
Field('grade', 'double'), # Final grade of student in assignment.
)
# This table stores information about experimental runs.
db.define_table('run_parameters',
Field('venue_id', db.venue, required=True),
Field('run_id'),
Field('params', 'text'), # Parameters of the run.
Field('date', 'datetime', default=datetime.utcnow()),
)
#####################
def represent_user_by_submission_feedback(v, r):
return A(v, _href=URL('feedback', 'view_feedback', args=['u', r.venue_id, v]))
| |
#! /usr/bin/env python
"""
Notes
-----
Calculations are carried out with numpy.float64 precision.
This Python implementation is not optimized for speed.
Angles are in radians unless specified otherwise.
Quaternions ix+jy+kz+w are represented as [x, y, z, w].
"""
import rospy
# Messages
from std_msgs.msg import Float64
from sensor_msgs.msg import JointState
from baxter_core_msgs.msg import EndpointState
from omni_msgs.msg import OmniState, OmniFeedback, OmniButtonEvent
from geometry_msgs.msg import Vector3, Point, PoseStamped, Quaternion, Wrench, Transform, PoseStamped
from visualization_msgs.msg import Marker
from std_msgs.msg import Bool
# State Machine
import smach
import smach_ros
from smach import CBState
# Math
from math import pi, exp, sin, sqrt
import numpy as np
import tf.transformations as tr
# Quaternions tools
import PyKDL
import time
GREY_BUTTON = 0
WHITE_BUTTON = 1
class TextColors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
class RatePositionController:
STATES = ['GO_TO_CENTER', 'POSITION_CONTROL', 'VIBRATORY_PHASE', 'RATE_CONTROL', 'RATE_COLLISION']
def __init__(self):
# Create a SMACH state machine
self.sm = smach.StateMachine(outcomes=['succeeded', 'aborted'])
with self.sm:
# Add states to the state machine
smach.StateMachine.add('GO_TO_CENTER', CBState(self.go_to_center, cb_args=[self]),
transitions={'lock': 'GO_TO_CENTER', 'succeeded_pos': 'POSITION_CONTROL','succeeded_rate': 'RATE_CONTROL', 'aborted': 'aborted'})
smach.StateMachine.add('POSITION_CONTROL', CBState(self.position_control, cb_args=[self]),
transitions={'stay': 'POSITION_CONTROL', 'leave': 'GO_TO_CENTER', 'aborted': 'aborted'})
#~ smach.StateMachine.add('VIBRATORY_PHASE', CBState(self.vibratory_phase, cb_args=[self]),
#~ transitions={'vibrate': 'VIBRATORY_PHASE', 'succeeded': 'RATE_CONTROL', 'aborted': 'aborted'})
smach.StateMachine.add('RATE_CONTROL', CBState(self.rate_control, cb_args=[self]),
transitions={'stay': 'RATE_CONTROL', 'leave': 'GO_TO_CENTER', 'collision': 'RATE_COLLISION', 'aborted': 'aborted'})
smach.StateMachine.add('RATE_COLLISION', CBState(self.rate_collision, cb_args=[self]),
transitions={'succeeded': 'GO_TO_CENTER', 'aborted': 'aborted'})
# Read all the parameters from the parameter server
# Topics to interact
master_name = self.read_parameter('~master_name', 'phantom')
slave_name = self.read_parameter('~slave_name', 'grips')
self.master_state_topic = '/%s/state' % master_name
#~ self.feedback_topic = '/%s/force_feedback' % master_name ##
self.slave_state_topic = '/%s/state' % slave_name
self.ik_mc_topic = '/%s/ik_command' % slave_name
self.gripper_topic = '/%s/GRIP/command' % slave_name
self.button_topic = '/%s/button' % master_name
self.slave_collision_topic = '/%s/collision' % slave_name
self.sm_control_topic = '/sm_control'
self.reset_signal = '/reset'
#~ self.ext_forces_topic = '/%s/external_forces' % slave_name ##
# Workspace definition
self.units = self.read_parameter('~units', 'mm')
width = self.read_parameter('~workspace/width', 140.0)
height = self.read_parameter('~workspace/height', 100.0)
depth = self.read_parameter('~workspace/depth', 55.0)
self.center_pos = self.read_parameter('~workspace/center', [0, 0 ,0])
self.workspace = np.array([width, depth, height])
self.hysteresis = self.read_parameter('~hysteresis', 3.0)
self.pivot_dist = self.read_parameter('~pivot_dist', 5.0)
# Force feedback parameters
self.k_center = self.read_parameter('~k_center', 0.1)
self.b_center = self.read_parameter('~b_center', 0.003)
self.k_rate = self.read_parameter('~k_rate', 0.05)
self.b_rate = self.read_parameter('~b_rate', 0.003)
# Position parameters
self.hysteresis = self.read_parameter('~hysteresis', 1.0)
self.pivot_dist = self.read_parameter('~pivot_dist', 5.0)
self.publish_frequency = self.read_parameter('~publish_rate', 1000.0)
self.position_ratio = self.read_parameter('~position_ratio', 250)
self.axes_rotation_1 = self.read_parameter('~axes_rotation_1', [0, 0, 0])
self.angle_rotation_1 = self.read_parameter('~angle_rotation_1',0.0)
self.axes_rotation_2 = self.read_parameter('~axes_rotation_2', [0, 0, 0])
self.angle_rotation_2 = self.read_parameter('~angle_rotation_2', 0.0)
self.axes_rotation_3 = self.read_parameter('~axes_rotation_3', [0, 0, 0])
self.angle_rotation_3 = self.read_parameter('~angle_rotation_3', 0.0)
self.position_axes = [0, 1, 2]
self.position_sign = np.array([1.0, 1.0, 1.0])
self.axes_mapping = self.read_parameter('~axes_mapping', ['x', 'y' ,'z'])
rospy.logwarn('axes_mapping[0] -> %s' % self.axes_mapping[0])
rospy.logwarn('axes_mapping[1] -> %s' % self.axes_mapping[1])
rospy.logwarn('axes_mapping[2] -> %s' % self.axes_mapping[2])
if len(self.axes_mapping) != 3:
rospy.logwarn('The invalid number of values in [axes_mapping]. Received 3, expected %d' % len(self.axes_mapping))
for i, axis in enumerate(self.axes_mapping):
axis = axis.lower()
if '-' == axis[0]:
axis = axis[1:]
self.position_sign[i] = -1.0
if axis not in ('x','y','z'):
rospy.logwarn('Invalid axis %s given in [axes_mapping]' % axis)
self.position_axes[i] = ['x','y','z'].index(axis)
# Vibration parameters
self.vib_a = self.read_parameter('~vibration/a', 2.0) # Amplitude (mm)
self.vib_c = self.read_parameter('~vibration/c', 5.0) # Damping
self.vib_freq = self.read_parameter('~vibration/frequency', 30.0) # Frequency (Hz)
self.vib_time = self.read_parameter('~vibration/duration', 0.3) # Duration (s)
self.vib_start_time = 0.0
# Rate parameters
self.rate_pivot = np.zeros(3)
self.rate_gain = self.read_parameter('~rate_gain', 1.0)
# Initial values
self.center_pos = np.array([0, 0, 0])
self.frame_id = self.read_parameter('~reference_frame', 'world')
self.colors = TextColors()
self.gripper_cmd = 0.0
self.master_pos = None
self.master_rot = np.array([0, 0, 0, 1])
self.master_vel = np.zeros(3)
self.master_dir = np.zeros(3)
self.slave_pos = None
self.slave_rot = np.array([0, 0, 0, 1])
self.slave_collision = False
self.timer = None
#~ self.force_feedback = np.zeros(3) ##
#~ self.ext_forces = np.zeros(3) ##
self.gripper_value = 0.5
self.button_states =np.array([0, 0])
self.time_counter = time.clock()
self.change_mode = False
self.pos_control = True
self.reset_value = 0.0
self.block_button = 0.0
self.bef_state_white = 0.0
self.bef_state_grey = 0.0
# Synch
self.slave_synch_pos = np.zeros(3)
self.slave_synch_rot = np.array([0, 0, 0, 1])
self.master_synch_rot = np.array([0, 0, 0, 1])
# Button
self.prev_buttons = [0] * 2
self.buttons = [False] * 2
self.buttons[WHITE_BUTTON] = True
# Setup Subscribers/Publishers
#~ self.feedback_pub = rospy.Publisher(self.feedback_topic, OmniFeedback)
self.sm_control_pub = rospy.Publisher(self.sm_control_topic, Float64)
self.ik_mc_pub = rospy.Publisher(self.ik_mc_topic, PoseStamped)
self.gripper_pub = rospy.Publisher(self.gripper_topic, Float64)
self.reset_pub = rospy.Publisher(self.reset_signal, Float64)
self.vis_pub = rospy.Publisher('visualization_marker', Marker)
rospy.Subscriber(self.master_state_topic, OmniState, self.cb_master_state)
rospy.Subscriber(self.slave_state_topic, PoseStamped, self.cb_slave_state)
rospy.Subscriber(self.slave_collision_topic, Bool, self.cb_slave_collision)
#~ rospy.Subscriber(self.ext_forces_topic, OmniFeedback, self.cb_ext_forces) ##
rospy.Subscriber(self.button_topic, OmniButtonEvent, self.buttons_cb)
self.loginfo('Waiting for [%s] and [%s] topics' % (self.master_state_topic, self.slave_state_topic))
while not rospy.is_shutdown():
if (self.slave_pos == None) or (self.master_pos == None):
rospy.sleep(0.01)
else:
self.loginfo('Rate position controller running')
# Register rospy shutdown hook
rospy.on_shutdown(self.shutdown_hook)
break
# Make sure the first command sent to the slave is equal to its current position6D
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
# Start the timer that will publish the ik commands
self.loginfo('Publisher frequency: [%f]' % self.publish_frequency)
self.timer = rospy.Timer(rospy.Duration(1.0/self.publish_frequency), self.publish_command)
self.loginfo('State machine state: GO_TO_CENTER')
@smach.cb_interface(outcomes=['lock', 'succeeded_rate','succeeded_pos', 'aborted'])
def go_to_center(user_data, self):
if not np.allclose(np.zeros(3), self.master_pos, atol=self.hysteresis):
#~ self.force_feedback = (self.k_center * self.master_pos + self.b_center * self.master_vel) * -1.0
self.sm_control_pub.publish(4.0)
return 'lock'
else:
if self.pos_control:
self.sm_control_pub.publish(1.0)
#~ self.force_feedback = np.zeros(3)
self.slave_synch_pos = np.array(self.slave_pos)
self.slave_synch_rot = np.array(self.slave_rot)
self.master_synch_rot = np.array(self.master_rot)
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
self.draw_position_region(self.slave_synch_pos)
self.loginfo('State machine transitioning: GO_TO_CENTER:succeeded-->POSITION_CONTROL')
return 'succeeded_pos'
elif not self.pos_control:
self.sm_control_pub.publish(1.0)
#~ self.force_feedback = np.zeros(3)
self.slave_synch_pos = np.array(self.slave_pos)
self.slave_synch_rot = np.array(self.slave_rot)
self.master_synch_rot = np.array(self.master_rot)
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
self.draw_position_region(self.slave_synch_pos)
self.loginfo('State machine transitioning: GO_TO_CENTER:succeeded-->RATE_CONTROL')
return 'succeeded_rate'
@smach.cb_interface(outcomes=['stay', 'leave', 'aborted'])
def position_control(user_data, self):
if not self.change_mode:
#~ if reset_value:
#~ self.slave_synch_pos = np.array(self.slave_pos)
#~ self.slave_synch_rot = np.array(self.slave_rot)
#~ self.master_synch_rot = np.array(self.master_rot)
self.command_pos = self.slave_synch_pos + self.master_pos / self.position_ratio
self.command_rot = np.array(self.master_rot)
self.sm_control_pub.publish(1.0)
#~ self.force_feedback = self.ext_forces ##
return 'stay'
else:
self.sm_control_pub.publish(3.0)
#~ self.force_feedback = np.zeros(3)
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
self.vib_start_time = rospy.get_time()
self.change_mode = False
self.pos_control = False
self.loginfo('State machine transitioning: POSITION_CONTROL:leave-->RATE_CONTROL')
return 'leave'
@smach.cb_interface(outcomes=['vibrate', 'succeeded', 'aborted'])
def vibratory_phase(user_data, self):
if rospy.get_time() < self.vib_start_time + self.vib_time:
self.sm_control_pub.publish(3.0)
#~ t = rospy.get_time() - self.vib_start_time
#~ amplitude = -self.vib_a*exp(-self.vib_c*t)*sin(2*pi*self.vib_freq*t);
#~ self.force_feedback = amplitude * self.master_dir
return 'vibrate'
else:
self.sm_control_pub.publish(2.0)
# The pivot point should be inside the position area but it's better when we use the center
#~ self.rate_pivot = self.master_pos - self.pivot_dist * self.normalize_vector(self.master_pos)
#~ self.force_feedback = np.zeros(3)
self.rate_pivot = np.array(self.master_pos)
self.loginfo('State machine transitioning: VIBRATORY_PHASE:succeeded-->RATE_CONTROL')
return 'succeeded'
@smach.cb_interface(outcomes=['stay', 'leave', 'collision', 'aborted'])
def rate_control(user_data, self):
if not self.slave_collision:
if not self.change_mode:
#~ if reset_value:
#~ self.slave_synch_pos = np.array(self.slave_pos)
#~ self.slave_synch_rot = np.array(self.slave_rot)
#~ self.master_synch_rot = np.array(self.master_rot)
# Send the force feedback to the master
#~ self.force_feedback = (self.k_rate * self.master_pos + self.b_rate * self.master_vel) * -1.0 ##
# Send the rate command to the slave
distance = sqrt(np.sum((self.master_pos - self.rate_pivot) ** 2)) / self.position_ratio
self.command_pos += (self.rate_gain * distance * self.normalize_vector(self.master_pos)) / self.position_ratio
self.command_rot = np.array(self.master_synch_rot)
self.sm_control_pub.publish(2.0)
return 'stay'
else :
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
#~ self.force_feedback = np.zeros(3)
self.sm_control_pub.publish(4.0)
self.loginfo('State machine transitioning: RATE_CONTROL:leave-->GO_TO_CENTER')
self.change_mode = False
self.pos_control = True
return 'leave'
else:
self.command_pos = np.array(self.slave_pos)
self.command_rot = np.array(self.slave_rot)
self.sm_control_pub.publish(0.0)
#~ self.force_feedback = np.zeros(3)
self.loginfo('State machine transitioning: RATE_CONTROL:collision-->RATE_COLLISION')
return 'collision'
@smach.cb_interface(outcomes=['succeeded', 'aborted'])
def rate_collision(user_data, self):
self.loginfo('State machine transitioning: RATE_COLLISION:succeeded-->GO_TO_CENTER')
self.change_mode = False
self.pos_control = True
return 'succeeded'
def execute(self):
self.sm.execute()
def shutdown_hook(self):
# Stop the state machine
self.sm.request_preempt()
# Stop the publisher timer
self.timer.shutdown()
def read_parameter(self, name, default):
if not rospy.has_param(name):
rospy.logwarn('Parameter [%s] not found, using default: %s' % (name, default))
return rospy.get_param(name, default)
def loginfo(self, msg):
rospy.logwarn(self.colors.OKBLUE + str(msg) + self.colors.ENDC)
def inside_workspace(self, point):
# The workspace as an ellipsoid: http://en.wikipedia.org/wiki/Ellipsoid
return np.sum(np.divide(point**2, self.workspace**2)) < 1
def normalize_vector(self, v):
result = np.array(v)
norm = np.sqrt(np.sum((result ** 2)))
if norm:
result /= norm
return result
def change_axes(self, array, index=None, sign=None):
if index == None:
index = self.position_axes
if sign == None:
sign = self.position_sign
result = np.zeros(len(array))
for i, idx in enumerate(index):
result[i] = array[idx] * sign[idx]
return result
def change_force_axes(self, array, index=None, sign=None):
if index == None:
index = self.position_axes
if sign == None:
sign = self.position_sign
result = np.zeros(len(array))
for i, idx in enumerate(index):
result[i] = array[idx] * sign[i] #~ ??
return result
#~ def send_feedback(self):
#~
#~ feedback_msg = OmniFeedback()
#~ force = self.change_force_axes(self.force_feedback)
#~ pos = self.change_axes(self.center_pos)
#~ feedback_msg.force = Vector3(*force)
#~ feedback_msg.position = Vector3(*pos)
#~ self.feedback_pub.publish(feedback_msg)
# DO NOT print to the console within this function
def cb_master_state(self, msg):
self.master_real_pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])
pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z]) - self.center_pos
vel = np.array([msg.velocity.x, msg.velocity.y, msg.velocity.z])
self.master_pos = self.change_axes(pos)
self.master_vel = self.change_axes(vel)
# Rotate tu use the same axes orientation between master and slave
real_rot = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
q_1 = tr.quaternion_about_axis(self.angle_rotation_1, self.axes_rotation_1)
aux_rot = tr.quaternion_multiply(q_1, real_rot)
q_2 = tr.quaternion_about_axis(self.angle_rotation_2, self.axes_rotation_2)
aux_rot_2 = tr.quaternion_multiply(q_2, aux_rot)
q_3 = tr.quaternion_about_axis(self.angle_rotation_3, self.axes_rotation_3)
self.master_rot = tr.quaternion_multiply(q_3, aux_rot_2)
#Normalize velocitity
self.master_dir = self.normalize_vector(self.master_vel)
def cb_slave_state(self, msg):
self.slave_pos = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])
self.slave_rot = np.array([msg.pose.orientation.x, msg.pose.orientation.y, msg.pose.orientation.z, msg.pose.orientation.w])
def cb_slave_collision(self, msg):
self.slave_collision = msg.data
#~ def cb_ext_forces(self, msg): ##
#~ self.ext_forces = np.array([msg.force.x, msg.force.y, msg.force.z]) ##
def buttons_cb(self, msg):
self.button_states = [msg.grey_button, msg.white_button]
#~ if (self.button_states[GREY_BUTTON] and self.button_states[WHITE_BUTTON]):
#~ self.change_mode = True
#~ else:
#~ self.change_mode = False
#~
#~ if (self.button_states[GREY_BUTTON]):
#~ pass
#~ uzjuzk
#~
#~ elif (self.button_states[WHITE_BUTTON]):
#~ self.reset_value = not self.reset_value
#------------------------------
#TRANSITION BLOCK
if (self.button_states[GREY_BUTTON] and self.button_states[WHITE_BUTTON]):
self.block_button= not self.block_button
self.change_mode = False
self.loginfo('TRANSITION')
#Modo emergencia _ DEAD MEN
if (self.button_states[WHITE_BUTTON]):
self.reset_value = 1.0
self.loginfo('dead men ')
else:
self.reset_value = 0.0
if( self.bef_state_white == 0.0 and self.bef_state_grey == 0.0):
#BLOCK0 = reset & change_mode
if (self.button_states[GREY_BUTTON] and self.block_button==0.0 and (not self.button_states[WHITE_BUTTON])):
self.change_mode = True
self.loginfo('CHANGE MODE')
else:
self.change_mode = False
self.loginfo('False Change mode')
#~ if (self.button_states[WHITE_BUTTON] and self.block_button == 0.0 and (not self.button_states[GREY_BUTTON])):
#~ self.reset_value = not self.reset_value
#~ self.reset_value = 1.0
#~ self.loginfo('RESET')
#~
#~ if (not self.button_states[WHITE_BUTTON]):
#~ self.reset_value = not self.reset_value
#~ self.reset_value = 0.0
#BLOCK1 = open & close grip
if(self.block_button==1):
if (self.button_states[GREY_BUTTON]==1.0 and self.button_states[WHITE_BUTTON]==0.0 and (not self.button_states[WHITE_BUTTON])): # Close
self.gripper_value += 0.0001
self.loginfo('CLOSE')
if self.gripper_value > 1.0:
self.gripper_value = 1.0
elif (self.button_states[GREY_BUTTON]==0.0 and self.button_states[WHITE_BUTTON]==1.0 and (not self.button_states[GREY_BUTTON])): # Open
self.gripper_value -= 0.0001
self.loginfo('COPEN')
if self.gripper_value < 0.0:
self.gripper_value = 0.0
self.bef_state_white = self.button_states[WHITE_BUTTON]
self.bef_state_grey = self.button_states[GREY_BUTTON]
#------------------------------
self.reset_pub.publish(Float64(self.reset_value))
def publish_command(self, event):
position, orientation = self.command_pos, self.command_rot
ik_mc_msg = PoseStamped()
ik_mc_msg.header.frame_id = self.frame_id
ik_mc_msg.header.stamp = rospy.Time.now()
ik_mc_msg.pose.position = Point(*position)
ik_mc_msg.pose.orientation = Quaternion(*orientation)
#Button selection
#~ if (self.button_states[GREY_BUTTON]==1 and self.button_states[WHITE_BUTTON]==0): # Close
#~ self.gripper_value += 0.0001
#~ if self.gripper_value > 1.0:
#~ self.gripper_value = 1.0
#~ elif (self.button_states[GREY_BUTTON]==0 and self.button_states[WHITE_BUTTON]==1 ): # Open
#~ self.gripper_value -= 0.0001
#~ if self.gripper_value < 0.0:
#~ self.gripper_value = 0.0
try:
self.gripper_pub.publish(Float64(self.gripper_value))
self.ik_mc_pub.publish(ik_mc_msg)
#~ t1 = time.time()
#~ t2 =time.time()
except rospy.exceptions.ROSException:
pass
#~ dif_time = t2-t1
#~ self.loginfo('Diference of time %0.3f ms' % (dif_time * 1000.0))
#~ self.time_counter = time.clock()
def draw_position_region(self, center_pos):
marker = Marker()
marker.header.frame_id = self.frame_id
marker.header.stamp = rospy.Time.now()
marker.id = 0;
marker.type = marker.SPHERE
marker.ns = 'position_region'
marker.action = marker.ADD
marker.pose.position.x = center_pos[0]
marker.pose.position.y = center_pos[1]
marker.pose.position.z = center_pos[2]
#~ Workspace ellipsoid: self.workspace
marker.scale.x = 0.2 * self.workspace[0]/self.position_ratio
marker.scale.y = 0.2 * self.workspace[1]/self.position_ratio
marker.scale.z = 0.2 * self.workspace[2]/self.position_ratio
marker.color.a = 0.5
marker.color.r = 1.0
marker.color.g = 1.0
marker.color.b = 0.2
#~ Publish
self.vis_pub.publish(marker)
if __name__ == '__main__':
rospy.init_node('rate_position_controller', log_level=rospy.WARN)
try:
controller = RatePositionController()
controller.execute()
except rospy.exceptions.ROSInterruptException:
pass
| |
import urlparse
import struct
import socket
import pprint
import sys
import threading
import Queue
import errno
import random
import time
#HOST = socket.gethostbyname('cs5700cdnproject.ccs.neu.edu')
HOST = '129.10.117.186'
MAPPINGS_UPDATE_PORT = 51004
CLIENTS_BROADCAST_PORT = 51003
# Given: The data and address of the client
# Returns: Unpacks the data and flags
class dns():
def __init__(self,data,address, sock):
self.flag = struct.Struct("!6H").unpack_from(data)[0]
self.id = struct.Struct("!6H").unpack_from(data)[1]
self.question = struct.Struct("!6H").unpack_from(data)[2]
self.answer = struct.Struct("!6H").unpack_from(data)[3]
self.authority = struct.Struct("!6H").unpack_from(data)[4]
self.additional_info = struct.Struct("!6H").unpack_from(data)[5]
self.offset = None
self.question_head = None
self.qr= None
self.opcode = None
self.aa = None
self.tc = None
self.rd = None
self.ra = None
self.z = None
self.rcode = None
self.ques_type = None
self.ques_name = None
self.ques_class = None
self.sock = sock
self.send_answer = True
#print self.flag
if ((self.flag & 0x8000) !=0):
self.qr=1
else:
self.qr=0
self.opcode = ((self.flag & 0x7800) >>11)
if ((self.flag & 0x400) !=0):
self.aa= 1
else:
self.aa =0
if ((self.flag & 0x200) !=0):
self.tc= 1
else:
self.tc =0
if ((self.flag & 0x100) !=0):
self.rd= 1
else:
self.rd = 0
if ((self.flag & 0x80) !=0):
self.ra= 1
else:
self.rd = 0
self.z= (self.flag & 0x70) >> 4
if ((self.flag & 0xF) != 0):
self.rcode = 1
else:
self.rcode = 0
self.offset= struct.Struct("!6H").size
self.question_head,self.offset = self.question_header(data,address)
# Given: The data,offset value,question header and address of the client
# Returns: Unpacks the question and checks if the question is "cs5700cdn.example.com"
def question_header(self,data,address):
ques_header = struct.Struct('!2H')
tot_ques = []
for _ in range(self.question):
self.ques_name, self.offset = get_value(self.offset, data)
self.ques_type, self.ques_class = ques_header.unpack_from(data,self.offset)
self.offset= self.offset + ques_header.size
ques = { "domain_name": self.ques_name,
"domain_type" : self.ques_type,
"domain_class": self.ques_class}
ques= self.ques_name
if ques == [ "cs5700cdn", "example", "com"]:
tot_ques.append(ques)
return tot_ques, self.offset
else:
self.sock.sendto(data,address)
self.send_answer = False
return 0,0
# Given: the data received from client and ip address of replica server
# Returns: The DNS response section with the ip address in Answer section
def dns_response(self, data, mapping):
try:
answer = ''
answer += data[:2] + "\x81\x80"
answer += data[4:6] + data[4:6] + "\x00\x00\x00\x00"
answer += data[12:]
answer += "\xc0\x0c"
answer += "\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04"
answer += str.join("",map(lambda(x): chr(int(x)),mapping.split('.')))
return answer
except:
sys.exit()
# Given: offset value and the data
# Returns: Returns the question name and the offset value
def get_value(offset,data):
name=[]
try:
while True:
size, =struct.unpack_from("!B",data,offset)
if(0xC0 & size) == 0xC0:
adr, = struct.unpack_from("!H", data, offset)
offset = offset + 2
return labels + get_value(adr & 0x3FFF, data), offset
offset +=1
if size ==0:
return name,offset
name.append(*struct.unpack_from("!%ds" % size, data, offset))
offset= offset+size
except:
sys.exit()
# Performs the DNS service
def service(s, data, addr, replicas):
# Unpack the headers
#dns_request(data, addr)
global mappings, mappings_lock
req_obj = dns(data,addr, s)
ip = None
with mappings_lock:
try:
value = mappings[addr[0]]
ip = value[0]
# print('Active measurement mapping! ' + ip)
except KeyError:
# print('No mapping found, assigning random replica server.')
ip = replicas[random.randint(0, len(replicas)-1)]
# mappings[addr[0]] = [ip, float('inf')]
# Send the address of the replica server
if req_obj.send_answer:
response = req_obj.dns_response(data,ip)
s.sendto(response,addr)
# print('Serviced: ' + str(addr))
def translations(port, ns, replicas):
global clients, clients_lock
if ns == "cs5700cdn.example.com":
try:
#UDP Socket Creation
s= socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
#Binding with the IP Address where the DNS Server runs
#ip = socket.gethostbyname(HOST)
#ip = socket.gethostbyname('localhost')
ip = HOST
s.bind((ip,port))
except socket.error,msg:
print msg
sys.exit(1)
while True:
# print('Waiting for request')
data,address= s.recvfrom(1024)
with clients_lock:
clients.add(address[0])
t = threading.Thread(target=service, args=(s,data,address, replicas))
t.start()
except socket.error as e:
print('socket error caught: ' + str(e))
sys.exit(1)
else:
print "CLI Error"
# Starts off the threads responsible for active measurement
def active_measurements(replicas):
t1 = threading.Thread(target=broadcast_new_clients, args=(replicas,))
t2 = threading.Thread(target=updated_mappings, args=(replicas,))
t1.start()
t2.start()
# Broadcasts the current set of clients to all the replica
# servers with which connection is established.
# replicas -> 9-tuple of ip addresses as strings
def broadcast_new_clients(replicas):
connections = []
dead_connections = []
# clients_encountered = None
new_clients = None
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((socket.gethostbyname(HOST), CLIENTS_BROADCAST_PORT))
s.setblocking(0)
s.listen(len(replicas))
global clients, clients_lock
while True:
if len(connections) != len(replicas):
try:
while True:
conn, addr = s.accept()
if addr[0] not in replicas:
conn.close()
else:
connections.append(conn)
except socket.error as e:
pass
# clients_encountered = set()
new_clients = []
with clients_lock:
new_clients = clients.copy()
try:
if new_clients != []:
for conn in connections:
try:
conn.send(''.join(map(lambda x: x+'$', new_clients)))
except socket.error as se:
errnum = se.args[0]
if errnum == errno.EPIPE:
dead_connections.append(conn)
except socket.error:
pass
finally:
for con in dead_connections:
connections.remove(con)
dead_connections = []
time.sleep(3)
# Object representing a connection to a replica server
class rep_conn():
def __init__(self, replica_ip):
self.replica_ip = replica_ip
self.conn = None
# self.pipe = Queue.Queue()
self.dict = {}
self.connected = False
self.buff_size = 65535
def assign_connection(self, conn):
self.conn = conn
self.conn.setblocking(0)
self.connected = True
def update(self):
values = None
data = ''
arrived = self.connected
try:
if self.connected:
data += self.conn.recv(self.buff_size)
arrived = True
# print('Update recv: ' + data)
except socket.error as se:
errnum = se.args[0]
if errnum == errno.EPIPE:
self.connected = False
elif errnum == errno.EWOULDBLOCK:
arrived = False
if arrived:
self.__update_mappings__(data)
def __update_mappings__(self, data):
lines = filter(lambda x: x!='', data.split('\n'))
for line in lines:
# print('the line:' + line)
ip, time = line.split()
self.dict[ip] = float(time)
# Recieves updates from the replica servers and accordingly updates
# its current mappings for client ip addresses
def updated_mappings(replicas):
connections = {}
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, MAPPINGS_UPDATE_PORT))
s.setblocking(0)
s.listen(len(replicas))
global mappings, mappings_lock
for ip in replicas:
connections[ip] = rep_conn(ip)
# pipes = []
buff_size = 65535
# for _ in range(len(replicas)):
# pipes.append(Queue())
while True:
try:
while not all(map(lambda x: x[1].connected, connections.items())):
conn, addr = s.accept()
if addr[0] not in replicas:
conn.close()
else:
connections[addr[0]].assign_connection(conn)
except socket.error as e:
pass
'''
data = None
start = int(time.time())
now = 0
has_data = None
while (now - start) >= 5:
try:
has_data = True
data, addr = s.recvfrom(buff_size)
except socket.error as e:
has_data = False
if has_data:
try:
connections[addr[0]].update(data)
except KeyError:
pass
now = int(time.time())
'''
keys_union = set()
for ip, repcon in connections.items():
repcon.update()
keys_union = set.union(set(repcon.dict.keys()), keys_union)
for ip, repcon in connections.items():
d = repcon.dict
for unmapped in (keys_union - set(d.keys())):
d[unmapped] = float('inf')
new_mappings = {}
for key in keys_union:
best_time = float('inf')
best_ip = None
for ip, repcon in connections.items():
if repcon.dict[key] < best_time:
best_time = repcon.dict[key]
best_ip = ip
if (best_time != float('inf')) and (best_ip != None):
new_mappings[key] = [best_ip, best_time]
with mappings_lock:
for key in new_mappings.keys():
try:
if new_mappings[key][1] < mappings[key][1]:
mappings[key] = new_mappings[key]
except KeyError:
mappings[key] = new_mappings[key]
time.sleep(3)
# Beginning of execution
def main():
'''
port_sym = sys.argv[1]
port = int(sys.argv[2])
url_sym = sys.argv[3]
url = sys.argv[4]
'''
number_of_args = 4
argv_1 = '-p'
argv_3 = '-n'
if len(sys.argv) != number_of_args + 1:
print('Bad number of arguments. Exiting.')
sys.exit(1)
else:
if (sys.argv[1] != argv_1) or (sys.argv[3] != argv_3):
print('Arguments do not follow format. Exiting.')
sys.exit(1)
elif not sys.argv[2].isdigit():
print('Invalid port number. Exiting.')
sys.exit(1)
port = int(sys.argv[2])
global MAPPINGS_UPDATE_PORT, CLIENTS_BROADCAST_PORT
if port > 40000:
if port > 50000:
if port > 60000:
if port > 2**16:
pass
else:
#60k - 2**16
MAPPINGS_UPDATE_PORT = port - 15328
CLIENTS_BROADCAST_PORT = port - 15329
else:
#50k - 60k
MAPPINGS_UPDATE_PORT = port - 10528
CLIENTS_BROADCAST_PORT = port - 10529
else:
#40k-50k
MAPPINGS_UPDATE_PORT = port + 15329
CLIENTS_BROADCAST_PORT = port + 15328
else:
#< 40k
pass
replicas = ('ec2-52-0-73-113.compute-1.amazonaws.com',
'ec2-52-16-219-28.eu-west-1.compute.amazonaws.com',
'ec2-52-11-8-29.us-west-2.compute.amazonaws.com',
'ec2-52-8-12-101.us-west-1.compute.amazonaws.com',
'ec2-52-28-48-84.eu-central-1.compute.amazonaws.com',
'ec2-52-68-12-77.ap-northeast-1.compute.amazonaws.com',
'ec2-52-74-143-5.ap-southeast-1.compute.amazonaws.com',
'ec2-52-64-63-125.ap-southeast-2.compute.amazonaws.com',
'ec2-54-94-214-108.sa-east-1.compute.amazonaws.com')
replicas = tuple(map(socket.gethostbyname, list(replicas)))
global clients, clients_lock, mappings, mappings_lock
clients = set()
clients_lock = threading.Lock()
mappings = {}
mappings_lock = threading.Lock()
t1 = threading.Thread(target=translations, args=(port, sys.argv[4], replicas))
t2 = threading.Thread(target=active_measurements, args=(replicas,))
t1.start()
t2.start()
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import distutils.spawn
import logging
import multiprocessing
import optparse
import os
import re
import shutil
import sys
import time
import zipfile
from util import build_utils
from util import md5_check
from util import jar_info_utils
sys.path.insert(
0,
os.path.join(build_utils.DIR_SOURCE_ROOT, 'third_party', 'colorama', 'src'))
import colorama
_JAVAC_EXTRACTOR = os.path.join(build_utils.DIR_SOURCE_ROOT, 'third_party',
'android_prebuilts', 'build_tools', 'common',
'framework', 'javac_extractor.jar')
# Full list of checks: https://errorprone.info/bugpatterns
ERRORPRONE_WARNINGS_TO_TURN_OFF = [
# This one should really be turned on.
'ParameterNotNullable',
# TODO(crbug.com/834807): Follow steps in bug
'DoubleBraceInitialization',
# TODO(crbug.com/834790): Follow steps in bug.
'CatchAndPrintStackTrace',
# TODO(crbug.com/801210): Follow steps in bug.
'SynchronizeOnNonFinalField',
# TODO(crbug.com/802073): Follow steps in bug.
'TypeParameterUnusedInFormals',
# TODO(crbug.com/803484): Follow steps in bug.
'CatchFail',
# TODO(crbug.com/803485): Follow steps in bug.
'JUnitAmbiguousTestClass',
# TODO(crbug.com/1027683): Follow steps in bug.
'UnnecessaryParentheses',
# TODO(wnwen): Fix issue in JavaUploadDataSinkBase.java
'PrimitiveAtomicReference',
# Android platform default is always UTF-8.
# https://developer.android.com/reference/java/nio/charset/Charset.html#defaultCharset()
'DefaultCharset',
# Low priority since the alternatives still work.
'JdkObsolete',
# We don't use that many lambdas.
'FunctionalInterfaceClash',
# There are lots of times when we just want to post a task.
'FutureReturnValueIgnored',
# Nice to be explicit about operators, but not necessary.
'OperatorPrecedence',
# Just false positives in our code.
'ThreadJoinLoop',
# Low priority corner cases with String.split.
# Linking Guava and using Splitter was rejected
# in the https://chromium-review.googlesource.com/c/chromium/src/+/871630.
'StringSplitter',
# Preferred to use another method since it propagates exceptions better.
'ClassNewInstance',
# Nice to have static inner classes but not necessary.
'ClassCanBeStatic',
# Explicit is better than implicit.
'FloatCast',
# Results in false positives.
'ThreadLocalUsage',
# Also just false positives.
'Finally',
# False positives for Chromium.
'FragmentNotInstantiable',
# Low priority to fix.
'HidingField',
# Low priority.
'IntLongMath',
# Low priority.
'BadComparable',
# Low priority.
'EqualsHashCode',
# Nice to fix but low priority.
'TypeParameterShadowing',
# Good to have immutable enums, also low priority.
'ImmutableEnumChecker',
# False positives for testing.
'InputStreamSlowMultibyteRead',
# Nice to have better primitives.
'BoxedPrimitiveConstructor',
# Not necessary for tests.
'OverrideThrowableToString',
# Nice to have better type safety.
'CollectionToArraySafeParameter',
# Makes logcat debugging more difficult, and does not provide obvious
# benefits in the Chromium codebase.
'ObjectToString',
# Triggers on private methods that are @CalledByNative.
'UnusedMethod',
# Triggers on generated R.java files.
'UnusedVariable',
# Not that useful.
'UnsafeReflectiveConstructionCast',
# Not that useful.
'MixedMutabilityReturnType',
# Nice to have.
'EqualsGetClass',
# A lot of false-positives from CharSequence.equals().
'UndefinedEquals',
# Nice to have.
'ExtendingJUnitAssert',
# Nice to have.
'SystemExitOutsideMain',
# Nice to have.
'TypeParameterNaming',
# Nice to have.
'UnusedException',
# Nice to have.
'UngroupedOverloads',
# Nice to have.
'FunctionalInterfaceClash',
# Nice to have.
'InconsistentOverloads',
# Dagger generated code triggers this.
'SameNameButDifferent',
# Nice to have.
'UnnecessaryLambda',
# Nice to have.
'UnnecessaryAnonymousClass',
# Nice to have.
'LiteProtoToString',
# Must be off since we are now passing in annotation processor generated
# code as a source jar (deduplicating work with turbine).
'RefersToDaggerCodegen',
# We already have presubmit checks for this. Not necessary to warn on
# every build.
'RemoveUnusedImports',
]
# Full list of checks: https://errorprone.info/bugpatterns
# Only those marked as "experimental" need to be listed here in order to be
# enabled. We build with -Werror, so all default checks cause builds to fail.
ERRORPRONE_WARNINGS_TO_ERROR = [
'BinderIdentityRestoredDangerously',
'EmptyIf',
'EqualsBrokenForNull',
'InvalidThrows',
'LongLiteralLowerCaseSuffix',
'MultiVariableDeclaration',
'RedundantOverride',
'StaticQualifiedUsingExpression',
'StringEquality',
'TimeUnitMismatch',
'UnnecessaryStaticImport',
'UseBinds',
'WildcardImport',
]
def ProcessJavacOutput(output):
fileline_prefix = r'(?P<fileline>(?P<file>[-.\w/\\]+.java):(?P<line>[0-9]+):)'
warning_re = re.compile(fileline_prefix +
r'(?P<full_message> warning: (?P<message>.*))$')
error_re = re.compile(fileline_prefix +
r'(?P<full_message> (?P<message>.*))$')
marker_re = re.compile(r'\s*(?P<marker>\^)\s*$')
# These warnings cannot be suppressed even for third party code. Deprecation
# warnings especially do not help since we must support older android version.
deprecated_re = re.compile(
r'(Note: .* uses? or overrides? a deprecated API.)$')
unchecked_re = re.compile(
r'(Note: .* uses? unchecked or unsafe operations.)$')
recompile_re = re.compile(r'(Note: Recompile with -Xlint:.* for details.)$')
warning_color = ['full_message', colorama.Fore.YELLOW + colorama.Style.DIM]
error_color = ['full_message', colorama.Fore.MAGENTA + colorama.Style.BRIGHT]
marker_color = ['marker', colorama.Fore.BLUE + colorama.Style.BRIGHT]
def Colorize(line, regex, color):
match = regex.match(line)
start = match.start(color[0])
end = match.end(color[0])
return (line[:start] + color[1] + line[start:end] + colorama.Fore.RESET +
colorama.Style.RESET_ALL + line[end:])
def ApplyFilters(line):
return not (deprecated_re.match(line) or unchecked_re.match(line)
or recompile_re.match(line))
def ApplyColors(line):
if warning_re.match(line):
line = Colorize(line, warning_re, warning_color)
elif error_re.match(line):
line = Colorize(line, error_re, error_color)
elif marker_re.match(line):
line = Colorize(line, marker_re, marker_color)
return line
return '\n'.join(map(ApplyColors, filter(ApplyFilters, output.split('\n'))))
def _ExtractClassFiles(jar_path, dest_dir, java_files):
"""Extracts all .class files not corresponding to |java_files|."""
# Two challenges exist here:
# 1. |java_files| have prefixes that are not represented in the the jar paths.
# 2. A single .java file results in multiple .class files when it contains
# nested classes.
# Here's an example:
# source path: ../../base/android/java/src/org/chromium/Foo.java
# jar paths: org/chromium/Foo.class, org/chromium/Foo$Inner.class
# To extract only .class files not related to the given .java files, we strip
# off ".class" and "$*.class" and use a substring match against java_files.
def extract_predicate(path):
if not path.endswith('.class'):
return False
path_without_suffix = re.sub(r'(?:\$|\.)[^/]*class$', '', path)
partial_java_path = path_without_suffix + '.java'
return not any(p.endswith(partial_java_path) for p in java_files)
logging.info('Extracting class files from %s', jar_path)
build_utils.ExtractAll(jar_path, path=dest_dir, predicate=extract_predicate)
for path in build_utils.FindInDirectory(dest_dir, '*.class'):
shutil.copystat(jar_path, path)
def _ParsePackageAndClassNames(java_file):
package_name = ''
class_names = []
with open(java_file) as f:
for l in f:
# Strip unindented comments.
# Considers a leading * as a continuation of a multi-line comment (our
# linter doesn't enforce a space before it like there should be).
l = re.sub(r'^(?://.*|/?\*.*?(?:\*/\s*|$))', '', l)
m = re.match(r'package\s+(.*?);', l)
if m and not package_name:
package_name = m.group(1)
# Not exactly a proper parser, but works for sources that Chrome uses.
# In order to not match nested classes, it just checks for lack of indent.
m = re.match(r'(?:\S.*?)?(?:class|@?interface|enum)\s+(.+?)\b', l)
if m:
class_names.append(m.group(1))
return package_name, class_names
def _ProcessJavaFileForInfo(java_file):
package_name, class_names = _ParsePackageAndClassNames(java_file)
return java_file, package_name, class_names
class _InfoFileContext(object):
"""Manages the creation of the class->source file .info file."""
def __init__(self, chromium_code, excluded_globs):
self._chromium_code = chromium_code
self._excluded_globs = excluded_globs
# Map of .java path -> .srcjar/nested/path.java.
self._srcjar_files = {}
# List of generators from pool.imap_unordered().
self._results = []
# Lazily created multiprocessing.Pool.
self._pool = None
def AddSrcJarSources(self, srcjar_path, extracted_paths, parent_dir):
for path in extracted_paths:
# We want the path inside the srcjar so the viewer can have a tree
# structure.
self._srcjar_files[path] = '{}/{}'.format(
srcjar_path, os.path.relpath(path, parent_dir))
def SubmitFiles(self, java_files):
if self._pool is None:
# Restrict to just one process to not slow down compiling. Compiling
# is always slower.
self._pool = multiprocessing.Pool(1)
logging.info('Submitting %d files for info', len(java_files))
self._results.append(
self._pool.imap_unordered(
_ProcessJavaFileForInfo, java_files, chunksize=1000))
def _CheckPathMatchesClassName(self, java_file, package_name, class_name):
parts = package_name.split('.') + [class_name + '.java']
expected_path_suffix = os.path.sep.join(parts)
if not java_file.endswith(expected_path_suffix):
raise Exception(('Java package+class name do not match its path.\n'
'Actual path: %s\nExpected path: %s') %
(java_file, expected_path_suffix))
def _ProcessInfo(self, java_file, package_name, class_names, source):
for class_name in class_names:
yield '{}.{}'.format(package_name, class_name)
# Skip aidl srcjars since they don't indent code correctly.
if '_aidl.srcjar' in source:
continue
assert not self._chromium_code or len(class_names) == 1, (
'Chromium java files must only have one class: {}'.format(source))
if self._chromium_code:
# This check is not necessary but nice to check this somewhere.
self._CheckPathMatchesClassName(java_file, package_name, class_names[0])
def _ShouldIncludeInJarInfo(self, fully_qualified_name):
name_as_class_glob = fully_qualified_name.replace('.', '/') + '.class'
return not build_utils.MatchesGlob(name_as_class_glob, self._excluded_globs)
def _Collect(self):
if self._pool is None:
return {}
ret = {}
for result in self._results:
for java_file, package_name, class_names in result:
source = self._srcjar_files.get(java_file, java_file)
for fully_qualified_name in self._ProcessInfo(java_file, package_name,
class_names, source):
if self._ShouldIncludeInJarInfo(fully_qualified_name):
ret[fully_qualified_name] = java_file
self._pool.terminate()
return ret
def __del__(self):
# Work around for Python 2.x bug with multiprocessing and daemon threads:
# https://bugs.python.org/issue4106
if self._pool is not None:
logging.info('Joining multiprocessing.Pool')
self._pool.terminate()
self._pool.join()
logging.info('Done.')
def Commit(self, output_path):
"""Writes a .jar.info file.
Maps fully qualified names for classes to either the java file that they
are defined in or the path of the srcjar that they came from.
"""
logging.info('Collecting info file entries')
entries = self._Collect()
logging.info('Writing info file: %s', output_path)
with build_utils.AtomicOutput(output_path) as f:
jar_info_utils.WriteJarInfoFile(f, entries, self._srcjar_files)
logging.info('Completed info file: %s', output_path)
def _CreateJarFile(jar_path, provider_configurations, additional_jar_files,
classes_dir):
logging.info('Start creating jar file: %s', jar_path)
with build_utils.AtomicOutput(jar_path) as f:
with zipfile.ZipFile(f.name, 'w') as z:
build_utils.ZipDir(z, classes_dir)
if provider_configurations:
for config in provider_configurations:
zip_path = 'META-INF/services/' + os.path.basename(config)
build_utils.AddToZipHermetic(z, zip_path, src_path=config)
if additional_jar_files:
for src_path, zip_path in additional_jar_files:
build_utils.AddToZipHermetic(z, zip_path, src_path=src_path)
logging.info('Completed jar file: %s', jar_path)
def _OnStaleMd5(options, javac_cmd, javac_args, java_files):
logging.info('Starting _OnStaleMd5')
if options.enable_kythe_annotations:
# Kythe requires those env variables to be set and compile_java.py does the
# same
if not os.environ.get('KYTHE_ROOT_DIRECTORY') or \
not os.environ.get('KYTHE_OUTPUT_DIRECTORY'):
raise Exception('--enable-kythe-annotations requires '
'KYTHE_ROOT_DIRECTORY and KYTHE_OUTPUT_DIRECTORY '
'environment variables to be set.')
javac_extractor_cmd = [
build_utils.JAVA_PATH,
'-jar',
_JAVAC_EXTRACTOR,
]
try:
_RunCompiler(options, javac_extractor_cmd + javac_args, java_files,
options.classpath, options.jar_path + '.javac_extractor',
save_outputs=False),
except build_utils.CalledProcessError as e:
# Having no index for particular target is better than failing entire
# codesearch. Log and error and move on.
logging.error('Could not generate kzip: %s', e)
# Compiles with Error Prone take twice as long to run as pure javac. Thus GN
# rules run both in parallel, with Error Prone only used for checks.
_RunCompiler(options, javac_cmd + javac_args, java_files,
options.classpath, options.jar_path,
save_outputs=not options.enable_errorprone)
logging.info('Completed all steps in _OnStaleMd5')
def _RunCompiler(options, javac_cmd, java_files, classpath, jar_path,
save_outputs=True):
logging.info('Starting _RunCompiler')
# Compiles with Error Prone take twice as long to run as pure javac. Thus GN
# rules run both in parallel, with Error Prone only used for checks.
save_outputs = not options.enable_errorprone
# Use jar_path's directory to ensure paths are relative (needed for goma).
temp_dir = jar_path + '.staging'
shutil.rmtree(temp_dir, True)
os.makedirs(temp_dir)
try:
classes_dir = os.path.join(temp_dir, 'classes')
if save_outputs:
input_srcjars_dir = os.path.join(options.generated_dir, 'input_srcjars')
annotation_processor_outputs_dir = os.path.join(
options.generated_dir, 'annotation_processor_outputs')
# Delete any stale files in the generated directory. The purpose of
# options.generated_dir is for codesearch.
shutil.rmtree(options.generated_dir, True)
info_file_context = _InfoFileContext(options.chromium_code,
options.jar_info_exclude_globs)
else:
input_srcjars_dir = os.path.join(temp_dir, 'input_srcjars')
annotation_processor_outputs_dir = os.path.join(
temp_dir, 'annotation_processor_outputs')
if options.java_srcjars:
logging.info('Extracting srcjars to %s', input_srcjars_dir)
build_utils.MakeDirectory(input_srcjars_dir)
for srcjar in options.java_srcjars:
extracted_files = build_utils.ExtractAll(
srcjar, no_clobber=True, path=input_srcjars_dir, pattern='*.java')
java_files.extend(extracted_files)
if save_outputs:
info_file_context.AddSrcJarSources(srcjar, extracted_files,
input_srcjars_dir)
logging.info('Done extracting srcjars')
if save_outputs and java_files:
info_file_context.SubmitFiles(java_files)
if java_files:
# Don't include the output directory in the initial set of args since it
# being in a temp dir makes it unstable (breaks md5 stamping).
cmd = list(javac_cmd)
cmd += ['-d', classes_dir]
cmd += ['-s', annotation_processor_outputs_dir]
# Pass classpath and source paths as response files to avoid extremely
# long command lines that are tedius to debug.
if classpath:
cmd += ['-classpath', ':'.join(classpath)]
java_files_rsp_path = os.path.join(temp_dir, 'files_list.txt')
with open(java_files_rsp_path, 'w') as f:
f.write(' '.join(java_files))
cmd += ['@' + java_files_rsp_path]
logging.debug('Build command %s', cmd)
os.makedirs(classes_dir)
os.makedirs(annotation_processor_outputs_dir)
start = time.time()
build_utils.CheckOutput(
cmd,
print_stdout=options.chromium_code,
stdout_filter=ProcessJavacOutput,
stderr_filter=ProcessJavacOutput)
end = time.time() - start
logging.info('Java compilation took %ss', end)
if save_outputs:
annotation_processor_java_files = build_utils.FindInDirectory(
annotation_processor_outputs_dir)
if annotation_processor_java_files:
info_file_context.SubmitFiles(annotation_processor_java_files)
_CreateJarFile(jar_path, options.provider_configurations,
options.additional_jar_files, classes_dir)
info_file_context.Commit(jar_path + '.info')
else:
build_utils.Touch(jar_path)
logging.info('Completed all steps in _RunCompiler')
finally:
shutil.rmtree(temp_dir)
def _ParseOptions(argv):
parser = optparse.OptionParser()
build_utils.AddDepfileOption(parser)
parser.add_option(
'--java-srcjars',
action='append',
default=[],
help='List of srcjars to include in compilation.')
parser.add_option(
'--generated-dir',
help='Subdirectory within target_gen_dir to place extracted srcjars and '
'annotation processor output for codesearch to find.')
parser.add_option(
'--bootclasspath',
action='append',
default=[],
help='Boot classpath for javac. If this is specified multiple times, '
'they will all be appended to construct the classpath.')
parser.add_option(
'--java-version',
help='Java language version to use in -source and -target args to javac.')
parser.add_option('--classpath', action='append', help='Classpath to use.')
parser.add_option(
'--processors',
action='append',
help='GN list of annotation processor main classes.')
parser.add_option(
'--processorpath',
action='append',
help='GN list of jars that comprise the classpath used for Annotation '
'Processors.')
parser.add_option(
'--processor-arg',
dest='processor_args',
action='append',
help='key=value arguments for the annotation processors.')
parser.add_option(
'--provider-configuration',
dest='provider_configurations',
action='append',
help='File to specify a service provider. Will be included '
'in the jar under META-INF/services.')
parser.add_option(
'--additional-jar-file',
dest='additional_jar_files',
action='append',
help='Additional files to package into jar. By default, only Java .class '
'files are packaged into the jar. Files should be specified in '
'format <filename>:<path to be placed in jar>.')
parser.add_option(
'--jar-info-exclude-globs',
help='GN list of exclude globs to filter from generated .info files.')
parser.add_option(
'--chromium-code',
type='int',
help='Whether code being compiled should be built with stricter '
'warnings for chromium code.')
parser.add_option(
'--gomacc-path', help='When set, prefix javac command with gomacc')
parser.add_option(
'--errorprone-path', help='Use the Errorprone compiler at this path.')
parser.add_option(
'--enable-errorprone',
action='store_true',
help='Enable errorprone checks')
parser.add_option(
'--warnings-as-errors',
action='store_true',
help='Treat all warnings as errors.')
parser.add_option('--jar-path', help='Jar output path.')
parser.add_option(
'--javac-arg',
action='append',
default=[],
help='Additional arguments to pass to javac.')
parser.add_option(
'--enable-kythe-annotations',
action='store_true',
help='Enable generation of Kythe kzip, used for codesearch. Ensure '
'proper environment variables are set before using this flag.')
options, args = parser.parse_args(argv)
build_utils.CheckOptions(options, parser, required=('jar_path', ))
options.bootclasspath = build_utils.ParseGnList(options.bootclasspath)
options.classpath = build_utils.ParseGnList(options.classpath)
options.processorpath = build_utils.ParseGnList(options.processorpath)
options.processors = build_utils.ParseGnList(options.processors)
options.java_srcjars = build_utils.ParseGnList(options.java_srcjars)
options.jar_info_exclude_globs = build_utils.ParseGnList(
options.jar_info_exclude_globs)
additional_jar_files = []
for arg in options.additional_jar_files or []:
filepath, jar_filepath = arg.split(':')
additional_jar_files.append((filepath, jar_filepath))
options.additional_jar_files = additional_jar_files
java_files = []
for arg in args:
# Interpret a path prefixed with @ as a file containing a list of sources.
if arg.startswith('@'):
java_files.extend(build_utils.ReadSourcesList(arg[1:]))
else:
java_files.append(arg)
return options, java_files
def main(argv):
build_utils.InitLogging('JAVAC_DEBUG')
colorama.init()
argv = build_utils.ExpandFileArgs(argv)
options, java_files = _ParseOptions(argv)
javac_cmd = []
if options.gomacc_path:
javac_cmd.append(options.gomacc_path)
javac_cmd.append(build_utils.JAVAC_PATH)
javac_args = [
'-g',
# Chromium only allows UTF8 source files. Being explicit avoids
# javac pulling a default encoding from the user's environment.
'-encoding',
'UTF-8',
# Prevent compiler from compiling .java files not listed as inputs.
# See: http://blog.ltgt.net/most-build-tools-misuse-javac/
'-sourcepath',
':',
]
if options.enable_errorprone:
# All errorprone args are passed space-separated in a single arg.
errorprone_flags = ['-Xplugin:ErrorProne']
for warning in ERRORPRONE_WARNINGS_TO_TURN_OFF:
errorprone_flags.append('-Xep:{}:OFF'.format(warning))
for warning in ERRORPRONE_WARNINGS_TO_ERROR:
errorprone_flags.append('-Xep:{}:ERROR'.format(warning))
if not options.warnings_as_errors:
errorprone_flags.append('-XepAllErrorsAsWarnings')
javac_args += ['-XDcompilePolicy=simple', ' '.join(errorprone_flags)]
# This flag quits errorprone after checks and before code generation, since
# we do not need errorprone outputs, this speeds up errorprone by 4 seconds
# for chrome_java.
javac_args += ['-XDshould-stop.ifNoError=FLOW']
if options.java_version:
javac_args.extend([
'-source',
options.java_version,
'-target',
options.java_version,
])
if options.java_version == '1.8':
# Android's boot jar doesn't contain all java 8 classes.
options.bootclasspath.append(build_utils.RT_JAR_PATH)
if options.warnings_as_errors:
javac_args.extend(['-Werror'])
else:
# XDignore.symbol.file makes javac compile against rt.jar instead of
# ct.sym. This means that using a java internal package/class will not
# trigger a compile warning or error.
javac_args.extend(['-XDignore.symbol.file'])
if options.processors:
javac_args.extend(['-processor', ','.join(options.processors)])
else:
# This effectively disables all annotation processors, even including
# annotation processors in service provider configuration files named
# META-INF/. See the following link for reference:
# https://docs.oracle.com/en/java/javase/11/tools/javac.html
javac_args.extend(['-proc:none'])
if options.bootclasspath:
javac_args.extend(['-bootclasspath', ':'.join(options.bootclasspath)])
if options.processorpath:
javac_args.extend(['-processorpath', ':'.join(options.processorpath)])
if options.processor_args:
for arg in options.processor_args:
javac_args.extend(['-A%s' % arg])
javac_args.extend(options.javac_arg)
classpath_inputs = (
options.bootclasspath + options.classpath + options.processorpath)
# GN already knows of java_files, so listing them just make things worse when
# they change.
depfile_deps = classpath_inputs + options.java_srcjars
input_paths = depfile_deps + java_files
input_paths += [x[0] for x in options.additional_jar_files]
output_paths = [
options.jar_path,
options.jar_path + '.info',
]
input_strings = javac_cmd + javac_args + options.classpath + java_files
if options.jar_info_exclude_globs:
input_strings.append(options.jar_info_exclude_globs)
md5_check.CallAndWriteDepfileIfStale(
lambda: _OnStaleMd5(options, javac_cmd, javac_args, java_files),
options,
depfile_deps=depfile_deps,
input_paths=input_paths,
input_strings=input_strings,
output_paths=output_paths)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMwareAPI.
"""
import collections
import contextlib
import datetime
from eventlet import greenthread
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as oslo_vim_util
from nova import block_device
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.image import glance
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit import test_flavors
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.virt import driver as v_driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('remove_unused_original_minimum_age_seconds',
'nova.virt.imagecache')
class fake_vm_ref(object):
def __init__(self):
self.value = 4
self._type = 'VirtualMachine'
class fake_service_content(object):
def __init__(self):
self.ServiceContent = vmwareapi_fake.DataObject()
self.ServiceContent.fake = 'fake'
def _fake_create_session(inst):
session = vmwareapi_fake.DataObject()
session.key = 'fake_key'
session.userName = 'fake_username'
session._pbm_wsdl_loc = None
session._pbm = None
inst._session = session
class VMwareDriverStartupTestCase(test.NoDBTestCase):
def _start_driver_with_flags(self, expected_exception_type, startup_flags):
self.flags(**startup_flags)
with mock.patch(
'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
e = self.assertRaises(
Exception, driver.VMwareVCDriver, None) # noqa
self.assertIs(type(e), expected_exception_type)
def test_start_driver_no_user(self):
self._start_driver_with_flags(
Exception,
dict(host_ip='ip', host_password='password',
group='vmware'))
def test_start_driver_no_host(self):
self._start_driver_with_flags(
Exception,
dict(host_username='username', host_password='password',
group='vmware'))
def test_start_driver_no_password(self):
self._start_driver_with_flags(
Exception,
dict(host_ip='ip', host_username='username',
group='vmware'))
def test_start_driver_with_user_host_password(self):
# Getting the InvalidInput exception signifies that no exception
# is raised regarding missing user/password/host
self._start_driver_with_flags(
nova.exception.InvalidInput,
dict(host_ip='ip', host_password='password',
host_username="user", datastore_regex="bad(regex",
group='vmware'))
class VMwareSessionTestCase(test.NoDBTestCase):
@mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
return_value=False)
def test_call_method(self, mock_is_vim):
with contextlib.nested(
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
) as (fake_create, fake_invoke):
session = driver.VMwareAPISession()
session._vim = mock.Mock()
module = mock.Mock()
session._call_method(module, 'fira')
fake_invoke.assert_called_once_with(module, 'fira', session._vim)
@mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
return_value=True)
def test_call_method_vim(self, mock_is_vim):
with contextlib.nested(
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
) as (fake_create, fake_invoke):
session = driver.VMwareAPISession()
module = mock.Mock()
session._call_method(module, 'fira')
fake_invoke.assert_called_once_with(module, 'fira')
class VMwareAPIVMTestCase(test.NoDBTestCase):
"""Unit tests for Vmware API connection calls."""
REQUIRES_LOCKING = True
@mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
def setUp(self, mock_register):
super(VMwareAPIVMTestCase, self).setUp()
ds_util.dc_cache_reset()
vm_util.vm_refs_cache_reset()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(cluster_name='test_cluster',
host_ip='test_url',
host_username='test_username',
host_password='test_pass',
api_retry_count=1,
use_linked_clone=False, group='vmware')
self.flags(enabled=False, group='vnc')
self.flags(image_cache_subdirectory_name='vmware_base',
my_ip='')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
stubs.set_stubs(self.stubs)
vmwareapi_fake.reset()
nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
self.conn = driver.VMwareVCDriver(None, False)
self._set_exception_vars()
self.node_name = self.conn._nodename
self.ds = 'ds1'
self.vim = vmwareapi_fake.FakeVim()
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = utils.get_test_network_info()
image_ref = nova.tests.unit.image.fake.get_valid_image_id()
(image_service, image_id) = glance.get_remote_image_service(
self.context, image_ref)
metadata = image_service.show(self.context, image_id)
self.image = {
'id': image_ref,
'disk_format': 'vmdk',
'size': int(metadata['size']),
}
self.fake_image_uuid = self.image['id']
nova.tests.unit.image.fake.stub_out_image_service(self.stubs)
self.vnc_host = 'ha-host'
self.instance_without_compute = fake_instance.fake_instance_obj(None,
**{'node': None,
'vm_state': 'building',
'project_id': 'fake',
'user_id': 'fake',
'name': '1',
'display_description': '1',
'kernel_id': '1',
'ramdisk_id': '1',
'mac_addresses': [
{'address': 'de:ad:be:ef:be:ef'}
],
'memory_mb': 8192,
'instance_type_id': 2,
'vcpus': 4,
'root_gb': 80,
'image_ref': self.image['id'],
'host': 'fake_host',
'task_state':
'scheduling',
'reservation_id': 'r-3t8muvr0',
'id': 1,
'uuid': 'fake-uuid',
'metadata': []})
def tearDown(self):
super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.unit.image.fake.FakeImageService_reset()
def test_legacy_block_device_info(self):
self.assertFalse(self.conn.need_legacy_block_device_info)
def test_get_host_ip_addr(self):
self.assertEqual('test_url', self.conn.get_host_ip_addr())
def test_init_host_with_no_session(self):
self.conn._session = mock.Mock()
self.conn._session.vim = None
self.conn.init_host('fake_host')
self.conn._session._create_session.assert_called_once_with()
def test_init_host(self):
try:
self.conn.init_host("fake_host")
except Exception as ex:
self.fail("init_host raised: %s" % ex)
def _set_exception_vars(self):
self.wait_task = self.conn._session._wait_for_task
self.call_method = self.conn._session._call_method
self.task_ref = None
self.exception = False
def test_cleanup_host(self):
self.conn.init_host("fake_host")
try:
self.conn.cleanup_host("fake_host")
except Exception as ex:
self.fail("cleanup_host raised: %s" % ex)
def test_driver_capabilities(self):
self.assertTrue(self.conn.capabilities['has_imagecache'])
self.assertFalse(self.conn.capabilities['supports_recreate'])
self.assertTrue(
self.conn.capabilities['supports_migrate_to_same_host'])
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_configuration_pbm(self, get_profile_mock):
get_profile_mock.return_value = 'fake-profile'
self.flags(pbm_enabled=True,
pbm_default_policy='fake-policy',
pbm_wsdl_location='fake-location', group='vmware')
self.conn._validate_configuration()
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_configuration_pbm_bad_default(self, get_profile_mock):
get_profile_mock.return_value = None
self.flags(pbm_enabled=True,
pbm_wsdl_location='fake-location',
pbm_default_policy='fake-policy', group='vmware')
self.assertRaises(error_util.PbmDefaultPolicyDoesNotExist,
self.conn._validate_configuration)
def test_login_retries(self):
self.attempts = 0
self.login_session = vmwareapi_fake.FakeVim()._login()
def _fake_login(_self):
self.attempts += 1
if self.attempts == 1:
raise vexc.VimConnectionException('Here is my fake exception')
return self.login_session
def _fake_check_session(_self):
return True
self.stubs.Set(vmwareapi_fake.FakeVim, '_login', _fake_login)
self.stubs.Set(vmwareapi_fake.FakeVim, '_check_session',
_fake_check_session)
with mock.patch.object(greenthread, 'sleep'):
self.conn = driver.VMwareAPISession()
self.assertEqual(2, self.attempts)
def _get_instance_type_by_name(self, type):
for instance_type in test_flavors.DEFAULT_FLAVOR_OBJS:
if instance_type.name == type:
return instance_type
if type == 'm1.micro':
return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
'name': 'm1.micro', 'deleted': 0, 'created_at': None,
'ephemeral_gb': 0, 'updated_at': None,
'disabled': False, 'vcpus': 1, 'extra_specs': {},
'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
'flavorid': '1', 'vcpu_weight': None, 'id': 2}
def _create_instance(self, node=None, set_image_ref=True,
uuid=None, instance_type='m1.large',
ephemeral=None, instance_type_updates=None):
if not node:
node = self.node_name
if not uuid:
uuid = uuidutils.generate_uuid()
self.type_data = dict(self._get_instance_type_by_name(instance_type))
if instance_type_updates:
self.type_data.update(instance_type_updates)
if ephemeral is not None:
self.type_data['ephemeral_gb'] = ephemeral
values = {'name': 'fake_name',
'id': 1,
'uuid': uuid,
'project_id': self.project_id,
'user_id': self.user_id,
'kernel_id': "fake_kernel_uuid",
'ramdisk_id': "fake_ramdisk_uuid",
'mac_address': "de:ad:be:ef:be:ef",
'flavor': objects.Flavor(**self.type_data),
'node': node,
'memory_mb': self.type_data['memory_mb'],
'root_gb': self.type_data['root_gb'],
'ephemeral_gb': self.type_data['ephemeral_gb'],
'vcpus': self.type_data['vcpus'],
'swap': self.type_data['swap'],
'expected_attrs': ['system_metadata'],
}
if set_image_ref:
values['image_ref'] = self.fake_image_uuid
self.instance_node = node
self.uuid = uuid
self.instance = fake_instance.fake_instance_obj(
self.context, **values)
def _create_vm(self, node=None, num_instances=1, uuid=None,
instance_type='m1.large', powered_on=True,
ephemeral=None, bdi=None, instance_type_updates=None):
"""Create and spawn the VM."""
if not node:
node = self.node_name
self._create_instance(node=node, uuid=uuid,
instance_type=instance_type,
ephemeral=ephemeral,
instance_type_updates=instance_type_updates)
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=bdi)
self._check_vm_record(num_instances=num_instances,
powered_on=powered_on,
uuid=uuid)
self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
def _get_vm_record(self):
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
for vm in vms.objects:
if vm.get('name') == self.uuid:
return vm
self.fail('Unable to find VM backing!')
def _get_info(self, uuid=None, node=None, name=None):
uuid = uuid if uuid else self.uuid
node = node if node else self.instance_node
name = name if node else '1'
return self.conn.get_info(fake_instance.fake_instance_obj(
None,
**{'uuid': uuid,
'name': name,
'node': node}))
def _check_vm_record(self, num_instances=1, powered_on=True, uuid=None):
"""Check if the spawned VM's properties correspond to the instance in
the db.
"""
instances = self.conn.list_instances()
if uuidutils.is_uuid_like(uuid):
self.assertEqual(num_instances, len(instances))
# Get Nova record for VM
vm_info = self._get_info()
vm = self._get_vm_record()
# Check that m1.large above turned into the right thing.
mem_kib = long(self.type_data['memory_mb']) << 10
vcpus = self.type_data['vcpus']
self.assertEqual(vm_info.max_mem_kb, mem_kib)
self.assertEqual(vm_info.mem_kb, mem_kib)
self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid)
self.assertEqual(vm.get("summary.config.numCpu"), vcpus)
self.assertEqual(vm.get("summary.config.memorySizeMB"),
self.type_data['memory_mb'])
self.assertEqual("ns0:VirtualE1000",
vm.get("config.hardware.device").VirtualDevice[2].obj_name)
if powered_on:
# Check that the VM is running according to Nova
self.assertEqual(power_state.RUNNING, vm_info.state)
# Check that the VM is running according to vSphere API.
self.assertEqual('poweredOn', vm.get("runtime.powerState"))
else:
# Check that the VM is not running according to Nova
self.assertEqual(power_state.SHUTDOWN, vm_info.state)
# Check that the VM is not running according to vSphere API.
self.assertEqual('poweredOff', vm.get("runtime.powerState"))
found_vm_uuid = False
found_iface_id = False
extras = vm.get("config.extraConfig")
for c in extras.OptionValue:
if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
found_vm_uuid = True
if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
found_iface_id = True
self.assertTrue(found_vm_uuid)
self.assertTrue(found_iface_id)
def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
"""Check if the get_info returned values correspond to the instance
object in the db.
"""
mem_kib = long(self.type_data['memory_mb']) << 10
self.assertEqual(info.state, pwr_state)
self.assertEqual(info.max_mem_kb, mem_kib)
self.assertEqual(info.mem_kb, mem_kib)
self.assertEqual(info.num_cpu, self.type_data['vcpus'])
def test_instance_exists(self):
self._create_vm()
self.assertTrue(self.conn.instance_exists(self.instance))
invalid_instance = fake_instance.fake_instance_obj(None, uuid='foo',
name='bar',
node=self.node_name)
self.assertFalse(self.conn.instance_exists(invalid_instance))
def test_list_instances_1(self):
self._create_vm()
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
def test_list_instance_uuids(self):
self._create_vm()
uuids = self.conn.list_instance_uuids()
self.assertEqual(1, len(uuids))
def test_list_instance_uuids_invalid_uuid(self):
self._create_vm(uuid='fake_id')
uuids = self.conn.list_instance_uuids()
self.assertEqual(0, len(uuids))
def _cached_files_exist(self, exists=True):
cache = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.vmdk' % self.fake_image_uuid)
if exists:
vmwareapi_fake.assertPathExists(self, str(cache))
else:
vmwareapi_fake.assertPathNotExists(self, str(cache))
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_instance_dir_disk_created(self, mock_from_image):
"""Test image file is cached when even when use_linked_clone
is False
"""
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
linked_clone=False)
mock_from_image.return_value = img_props
self._create_vm()
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathExists(self, str(path))
self._cached_files_exist()
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_cache_dir_disk_created(self, mock_from_image):
"""Test image disk is cached when use_linked_clone is True."""
self.flags(use_linked_clone=True, group='vmware')
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1 * units.Ki,
disk_type=constants.DISK_TYPE_SPARSE)
mock_from_image.return_value = img_props
self._create_vm()
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.vmdk' % self.fake_image_uuid)
root = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' % self.fake_image_uuid)
vmwareapi_fake.assertPathExists(self, str(path))
vmwareapi_fake.assertPathExists(self, str(root))
def _iso_disk_type_created(self, instance_type='m1.large'):
self.image['disk_format'] = 'iso'
self._create_vm(instance_type=instance_type)
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid)
vmwareapi_fake.assertPathExists(self, str(path))
def test_iso_disk_type_created(self):
self._iso_disk_type_created()
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathExists(self, str(path))
def test_iso_disk_type_created_with_root_gb_0(self):
self._iso_disk_type_created(instance_type='m1.micro')
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathNotExists(self, str(path))
def test_iso_disk_cdrom_attach(self):
iso_path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid)
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path))
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
fake_attach_cdrom)
self.image['disk_format'] = 'iso'
self._create_vm()
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_iso_disk_cdrom_attach_with_config_drive(self,
mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=80 * units.Gi,
file_type='iso',
linked_clone=False)
mock_from_image.return_value = img_props
self.flags(force_config_drive=True)
iso_path = [
ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid),
ds_obj.DatastorePath(self.ds, 'fake-config-drive')]
self.iso_index = 0
def fake_create_config_drive(instance, injected_files, password,
network_info, data_store_name,
folder, uuid, cookies):
return 'fake-config-drive'
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
self.iso_index += 1
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
fake_attach_cdrom)
self.stubs.Set(self.conn._vmops, '_create_config_drive',
fake_create_config_drive)
self.image['disk_format'] = 'iso'
self._create_vm()
self.assertEqual(2, self.iso_index)
def test_ephemeral_disk_attach(self):
self._create_vm(ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_ephemeral_disk_attach_from_bdi(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'size': 25},
{'device_type': 'disk',
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'size': 25}]
bdi = {'ephemerals': ephemerals}
self._create_vm(bdi=bdi, ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_1.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_ephemeral_disk_attach_from_bdii_with_no_ephs(self):
bdi = {'ephemerals': []}
self._create_vm(bdi=bdi, ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_cdrom_attach_with_config_drive(self):
self.flags(force_config_drive=True)
iso_path = ds_obj.DatastorePath(self.ds, 'fake-config-drive')
self.cd_attach_called = False
def fake_create_config_drive(instance, injected_files, password,
network_info, data_store_name,
folder, uuid, cookies):
return 'fake-config-drive'
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path))
self.cd_attach_called = True
self.stubs.Set(self.conn._vmops, "_attach_cdrom_to_vm",
fake_attach_cdrom)
self.stubs.Set(self.conn._vmops, '_create_config_drive',
fake_create_config_drive)
self._create_vm()
self.assertTrue(self.cd_attach_called)
@mock.patch.object(vmops.VMwareVMOps, 'power_off')
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume')
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes(self,
mock_destroy,
mock_detach_volume,
mock_power_off):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertNotEqual(vm_states.STOPPED, self.instance.vm_state)
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_power_off.assert_called_once_with(self.instance)
self.assertEqual(vm_states.STOPPED, self.instance.vm_state)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
mock_destroy.assert_called_once_with(self.instance, True)
@mock.patch.object(vmops.VMwareVMOps, 'power_off',
side_effect=vexc.ManagedObjectNotFoundException())
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_missing(self,
mock_destroy,
mock_power_off):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertNotEqual(vm_states.STOPPED, self.instance.vm_state)
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_power_off.assert_called_once_with(self.instance)
mock_destroy.assert_called_once_with(self.instance, True)
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume',
side_effect=exception.NovaException())
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_with_exception(
self, mock_destroy, mock_detach_volume):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertRaises(exception.NovaException,
self.conn.destroy, self.context, self.instance,
self.network_info, block_device_info=bdi)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
self.assertFalse(mock_destroy.called)
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume',
side_effect=exception.StorageError(reason='oh man'))
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_with_storage_error(
self, mock_destroy, mock_detach_volume):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
self.assertTrue(mock_destroy.called)
mock_destroy.assert_called_once_with(self.instance, True)
def test_spawn(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_vm_ref_cached(self):
uuid = uuidutils.generate_uuid()
self.assertIsNone(vm_util.vm_ref_cache_get(uuid))
self._create_vm(uuid=uuid)
self.assertIsNotNone(vm_util.vm_ref_cache_get(uuid))
def test_spawn_power_on(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_root_size_0(self):
self._create_vm(instance_type='m1.micro')
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
cache = ('[%s] vmware_base/%s/%s.vmdk' %
(self.ds, self.fake_image_uuid, self.fake_image_uuid))
gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' %
(self.ds, self.fake_image_uuid, self.fake_image_uuid))
vmwareapi_fake.assertPathExists(self, cache)
vmwareapi_fake.assertPathNotExists(self, gb_cache)
def _spawn_with_delete_exception(self, fault=None):
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "DeleteDatastoreFile_Task":
self.exception = True
task_mdo = vmwareapi_fake.create_task(method, "error",
error_fault=fault)
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
if fault:
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
else:
self.assertRaises(vexc.VMwareDriverException, self._create_vm)
self.assertTrue(self.exception)
def test_spawn_with_delete_exception_not_found(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound())
def test_spawn_with_delete_exception_file_fault(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileFault())
def test_spawn_with_delete_exception_cannot_delete_file(self):
self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile())
def test_spawn_with_delete_exception_file_locked(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileLocked())
def test_spawn_with_delete_exception_general(self):
self._spawn_with_delete_exception()
def test_spawn_disk_extend(self):
self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
requested_size = 80 * units.Mi
self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
requested_size, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_disk_extend_exists(self):
root = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' % self.fake_image_uuid)
def _fake_extend(instance, requested_size, name, dc_ref):
vmwareapi_fake._add_file(str(root))
self.stubs.Set(self.conn._vmops, '_extend_virtual_disk',
_fake_extend)
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
vmwareapi_fake.assertPathExists(self, str(root))
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_extend_sparse(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=units.Ki,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
with contextlib.nested(
mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'),
) as (mock_extend, mock_get_dc):
dc_val = mock.Mock()
dc_val.ref = "fake_dc_ref"
dc_val.name = "dc1"
mock_get_dc.return_value = dc_val
self._create_vm()
iid = img_props.image_id
cached_image = ds_obj.DatastorePath(self.ds, 'vmware_base',
iid, '%s.80.vmdk' % iid)
mock_extend.assert_called_once_with(
self.instance, self.instance.root_gb * units.Mi,
str(cached_image), "fake_dc_ref")
def test_spawn_disk_extend_failed_copy(self):
# Spawn instance
# copy for extend fails without creating a file
#
# Expect the copy error to be raised
self.flags(use_linked_clone=True, group='vmware')
CopyError = vexc.FileFaultException
def fake_wait_for_task(task_ref):
if task_ref == 'fake-copy-task':
raise CopyError('Copy failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
if method == "CopyVirtualDisk_Task":
return 'fake-copy-task'
return self.call_method(module, method, *args, **kwargs)
with contextlib.nested(
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method),
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task)):
self.assertRaises(CopyError, self._create_vm)
def test_spawn_disk_extend_failed_partial_copy(self):
# Spawn instance
# Copy for extend fails, leaving a file behind
#
# Expect the file to be cleaned up
# Expect the copy error to be raised
self.flags(use_linked_clone=True, group='vmware')
self.task_ref = None
uuid = self.fake_image_uuid
cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
uuid, uuid)
CopyError = vexc.FileFaultException
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
vmwareapi_fake.assertPathExists(self, cached_image)
# N.B. We don't test for -flat here because real
# CopyVirtualDisk_Task doesn't actually create it
raise CopyError('Copy failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "CopyVirtualDisk_Task":
self.task_ref = task_ref
return task_ref
with contextlib.nested(
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method),
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task)):
self.assertRaises(CopyError, self._create_vm)
vmwareapi_fake.assertPathNotExists(self, cached_image)
def test_spawn_disk_extend_failed_partial_copy_failed_cleanup(self):
# Spawn instance
# Copy for extend fails, leaves file behind
# File cleanup fails
#
# Expect file to be left behind
# Expect file cleanup error to be raised
self.flags(use_linked_clone=True, group='vmware')
self.task_ref = None
uuid = self.fake_image_uuid
cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
uuid, uuid)
CopyError = vexc.FileFaultException
DeleteError = vexc.CannotDeleteFileException
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
vmwareapi_fake.assertPathExists(self, cached_image)
# N.B. We don't test for -flat here because real
# CopyVirtualDisk_Task doesn't actually create it
raise CopyError('Copy failed!')
elif task_ref == 'fake-delete-task':
raise DeleteError('Delete failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
if method == "DeleteDatastoreFile_Task":
return 'fake-delete-task'
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "CopyVirtualDisk_Task":
self.task_ref = task_ref
return task_ref
with contextlib.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method)):
self.assertRaises(DeleteError, self._create_vm)
vmwareapi_fake.assertPathExists(self, cached_image)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_invalid_disk_size(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=82 * units.Gi,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
self.assertRaises(exception.InstanceUnacceptable,
self._create_vm)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1024,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
cached_image = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' %
self.fake_image_uuid)
tmp_file = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80-flat.vmdk' %
self.fake_image_uuid)
NoDiskSpace = vexc.get_fault_class('NoDiskSpace')
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
raise NoDiskSpace()
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == 'ExtendVirtualDisk_Task':
self.task_ref = task_ref
return task_ref
with contextlib.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (mock_wait_for_task, mock_call_method):
self.assertRaises(NoDiskSpace, self._create_vm)
vmwareapi_fake.assertPathNotExists(self, str(cached_image))
vmwareapi_fake.assertPathNotExists(self, str(tmp_file))
def test_spawn_with_move_file_exists_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise an file exists exception. The flag
# self.exception will be checked to see that
# the exception has indeed been raised.
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.exception = True
raise vexc.FileAlreadyExistsException()
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.task_ref = task_ref
return task_ref
with contextlib.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(self.exception)
def test_spawn_with_move_general_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise a general exception. The flag self.exception
# will be checked to see that the exception has
# indeed been raised.
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.exception = True
raise vexc.VMwareDriverException('Exception!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.task_ref = task_ref
return task_ref
with contextlib.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
self.assertRaises(vexc.VMwareDriverException,
self._create_vm)
self.assertTrue(self.exception)
def test_spawn_with_move_poll_exception(self):
self.call_method = self.conn._session._call_method
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
task_mdo = vmwareapi_fake.create_task(method, "error")
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
self.assertRaises(vexc.VMwareDriverException,
self._create_vm)
def test_spawn_with_move_file_exists_poll_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise a file exists exception. The flag self.exception
# will be checked to see that the exception has
# indeed been raised.
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.exception = True
task_mdo = vmwareapi_fake.create_task(method, "error",
error_fault=vmwareapi_fake.FileAlreadyExists())
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(self.exception)
def _spawn_attach_volume_vmdk(self, set_image_ref=True):
self._create_instance(set_image_ref=set_image_ref)
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
connection_info = self._test_vmdk_connection_info('vmdk')
root_disk = [{'connection_info': connection_info,
'boot_index': 0}]
v_driver.block_device_info_get_mapping(
mox.IgnoreArg()).AndReturn(root_disk)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_get_res_pool_of_vm')
volumeops.VMwareVolumeOps._get_res_pool_of_vm(
mox.IgnoreArg()).AndReturn('fake_res_pool')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_relocate_vmdk_volume')
volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(),
'fake_res_pool', mox.IgnoreArg())
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_volume')
volumeops.VMwareVolumeOps.attach_volume(connection_info,
self.instance, constants.DEFAULT_ADAPTER_TYPE)
self.mox.ReplayAll()
block_device_info = {'block_device_mapping': root_disk}
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=block_device_info)
def test_spawn_attach_volume_iscsi(self):
self._create_instance()
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
connection_info = self._test_vmdk_connection_info('iscsi')
root_disk = [{'connection_info': connection_info,
'boot_index': 0}]
v_driver.block_device_info_get_mapping(
mox.IgnoreArg()).AndReturn(root_disk)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_volume')
volumeops.VMwareVolumeOps.attach_volume(connection_info,
self.instance, constants.DEFAULT_ADAPTER_TYPE)
self.mox.ReplayAll()
block_device_info = {'mount_device': 'vda'}
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=block_device_info)
def test_spawn_hw_versions(self):
updates = {'extra_specs': {'vmware:hw_version': 'vmx-08'}}
self._create_vm(instance_type_updates=updates)
vm = self._get_vm_record()
version = vm.get("version")
self.assertEqual('vmx-08', version)
def mock_upload_image(self, context, image, instance, session, **kwargs):
self.assertEqual('Test-Snapshot', image)
self.assertEqual(self.instance, instance)
self.assertEqual(1024, kwargs['vmdk_size'])
def test_get_vm_ref_using_extra_config(self):
self._create_vm()
vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
self.instance['uuid'])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
# Disrupt the fake Virtual Machine object so that extraConfig
# cannot be matched.
fake_vm = self._get_vm_record()
fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = ""
# We should not get a Virtual Machine through extraConfig.
vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
self.instance['uuid'])
self.assertIsNone(vm_ref, 'VM Reference should be none')
# Check if we can find the Virtual Machine using the name.
vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
def test_search_vm_ref_by_identifier(self):
self._create_vm()
vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session,
self.instance['uuid'])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
fake_vm = self._get_vm_record()
fake_vm.set("summary.config.instanceUuid", "foo")
fake_vm.set("name", "foo")
fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo"
self.assertIsNone(vm_util.search_vm_ref_by_identifier(
self.conn._session, self.instance['uuid']),
"VM Reference should be none")
self.assertIsNotNone(
vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"),
"VM Reference should not be none")
def test_get_object_for_optionvalue(self):
self._create_vm()
vms = self.conn._session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
vm_ref = vm_util._get_object_for_optionvalue(vms,
self.instance["uuid"])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
def _test_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
with mock.patch.object(images, 'upload_image_stream_optimized',
self.mock_upload_image):
self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
func_call_matcher.call)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertIsNone(func_call_matcher.match())
def test_snapshot(self):
self._create_vm()
self._test_snapshot()
def test_snapshot_no_root_disk(self):
self._iso_disk_type_created(instance_type='m1.micro')
self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_snapshot_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_snapshot_delete_vm_snapshot(self):
self._create_vm()
fake_vm = self._get_vm_record()
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
self.mox.StubOutWithMock(vmops.VMwareVMOps,
'_create_vm_snapshot')
self.conn._vmops._create_vm_snapshot(
self.instance, fake_vm.obj).AndReturn(snapshot_ref)
self.mox.StubOutWithMock(vmops.VMwareVMOps,
'_delete_vm_snapshot')
self.conn._vmops._delete_vm_snapshot(
self.instance, fake_vm.obj, snapshot_ref).AndReturn(None)
self.mox.ReplayAll()
self._test_snapshot()
def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
self._create_vm()
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
with contextlib.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=exception),
mock.patch.object(vmops, '_time_sleep_wrapper')
) as (_fake_wait, _fake_sleep):
if exception != vexc.TaskInProgress:
self.assertRaises(exception,
self.conn._vmops._delete_vm_snapshot,
self.instance, fake_vm, snapshot_ref)
self.assertEqual(0, _fake_sleep.call_count)
else:
self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm,
snapshot_ref)
self.assertEqual(call_count - 1, _fake_sleep.call_count)
self.assertEqual(call_count, _fake_wait.call_count)
def test_snapshot_delete_vm_snapshot_exception(self):
self._snapshot_delete_vm_snapshot_exception(exception.NovaException)
def test_snapshot_delete_vm_snapshot_exception_retry(self):
self.flags(api_retry_count=5, group='vmware')
self._snapshot_delete_vm_snapshot_exception(vexc.TaskInProgress,
5)
def test_reboot(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_hard(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "HARD"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_with_uuid(self):
"""Test fall back to use name when can't find by uuid."""
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
instances = [self.instance]
self.conn.poll_rebooting_instances(60, instances)
def test_reboot_not_poweredon(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_suspend(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
self.context, self.instance)
def test_resume(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.context, self.instance, self.network_info)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
self.context, self.instance, self.network_info)
def test_resume_not_suspended(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.context, self.instance, self.network_info)
def test_power_on(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.context, self.instance, self.network_info)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_power_on_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
self.context, self.instance, self.network_info)
def test_power_off(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SHUTDOWN)
def test_power_off_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
self.instance)
@mock.patch.object(driver.VMwareVCDriver, 'reboot')
@mock.patch.object(vm_util, 'get_vm_state',
return_value='poweredOff')
def test_resume_state_on_host_boot(self, mock_get_vm_state,
mock_reboot):
self._create_instance()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
mock_get_vm_state.assert_called_once_with(self.conn._session,
self.instance)
mock_reboot.assert_called_once_with(self.context, self.instance,
'network_info', 'hard', None)
def test_resume_state_on_host_boot_no_reboot(self):
self._create_instance()
for state in ['poweredOn', 'suspended']:
with contextlib.nested(
mock.patch.object(driver.VMwareVCDriver, 'reboot'),
mock.patch.object(vm_util, 'get_vm_state',
return_value=state)
) as (mock_reboot, mock_get_vm_state):
self.conn.resume_state_on_host_boot(self.context,
self.instance,
'network_info')
mock_get_vm_state.assert_called_once_with(self.conn._session,
self.instance)
self.assertFalse(mock_reboot.called)
def test_destroy(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
self.conn.destroy(self.context, self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
def test_destroy_no_datastore(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
# Delete the vmPathName
vm = self._get_vm_record()
vm.delete('config.files.vmPathName')
self.conn.destroy(self.context, self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
def test_destroy_non_existent(self):
self.destroy_disks = True
with mock.patch.object(self.conn._vmops,
"destroy") as mock_destroy:
self._create_instance()
self.conn.destroy(self.context, self.instance,
self.network_info,
None, self.destroy_disks)
mock_destroy.assert_called_once_with(self.instance,
self.destroy_disks)
def test_destroy_instance_without_compute(self):
self.destroy_disks = True
with mock.patch.object(self.conn._vmops,
"destroy") as mock_destroy:
self.conn.destroy(self.context, self.instance_without_compute,
self.network_info,
None, self.destroy_disks)
self.assertFalse(mock_destroy.called)
def _destroy_instance_without_vm_ref(self,
task_state=None):
def fake_vm_ref_from_name(session, vm_name):
return 'fake-ref'
self._create_instance()
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref_from_name',
fake_vm_ref_from_name),
mock.patch.object(self.conn._session,
'_call_method'),
mock.patch.object(self.conn._vmops,
'_destroy_instance')
) as (mock_get, mock_call, mock_destroy):
self.instance.task_state = task_state
self.conn.destroy(self.context, self.instance,
self.network_info,
None, True)
if task_state == task_states.RESIZE_REVERTING:
expected = 0
else:
expected = 1
self.assertEqual(expected, mock_destroy.call_count)
self.assertFalse(mock_call.called)
def test_destroy_instance_without_vm_ref(self):
self._destroy_instance_without_vm_ref()
def test_destroy_instance_without_vm_ref_with_resize_revert(self):
self._destroy_instance_without_vm_ref(
task_state=task_states.RESIZE_REVERTING)
def _rescue(self, config_drive=False):
# validate that the power on is only called once
self._power_on = vm_util.power_on_instance
self._power_on_called = 0
def fake_attach_disk_to_vm(vm_ref, instance,
adapter_type, disk_type, vmdk_path=None,
disk_size=None, linked_clone=False,
controller_key=None, unit_number=None,
device_name=None):
info = self.conn.get_info(instance)
self._check_vm_info(info, power_state.SHUTDOWN)
if config_drive:
def fake_create_config_drive(instance, injected_files, password,
network_info, data_store_name,
folder, instance_uuid, cookies):
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
return str(ds_obj.DatastorePath(data_store_name,
instance_uuid, 'fake.iso'))
self.stubs.Set(self.conn._vmops, '_create_config_drive',
fake_create_config_drive)
self._create_vm()
def fake_power_on_instance(session, instance, vm_ref=None):
self._power_on_called += 1
return self._power_on(session, instance, vm_ref=vm_ref)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.stubs.Set(vm_util, "power_on_instance",
fake_power_on_instance)
self.stubs.Set(self.conn._volumeops, "attach_disk_to_vm",
fake_attach_disk_to_vm)
self.conn.rescue(self.context, self.instance, self.network_info,
self.image, 'fake-password')
info = self.conn.get_info({'name': '1',
'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
info = self.conn.get_info({'name': '1-orig',
'uuid': '%s-orig' % self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SHUTDOWN)
self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
self.assertEqual(1, self._power_on_called)
def test_get_diagnostics(self):
self._create_vm()
expected = {'memoryReservation': 0, 'suspendInterval': 0,
'maxCpuUsage': 2000, 'toolsInstallerMounted': False,
'consumedOverheadMemory': 20, 'numEthernetCards': 1,
'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}],
'memoryOverhead': 21417984,
'guestMemoryUsage': 0, 'connectionState': 'connected',
'memorySizeMB': 512, 'balloonedMemory': 0,
'vmPathName': 'fake_path', 'template': False,
'overallCpuUsage': 0, 'powerState': 'poweredOn',
'cpuReservation': 0, 'overallCpuDemand': 0,
'numVirtualDisks': 1, 'hostMemoryUsage': 141}
expected = {'vmware:' + k: v for k, v in expected.items()}
instance = fake_instance.fake_instance_obj(None,
name=1,
uuid=self.uuid,
node=self.instance_node)
self.assertThat(
self.conn.get_diagnostics(instance),
matchers.DictMatches(expected))
def test_get_instance_diagnostics(self):
self._create_vm()
expected = {'uptime': 0,
'memory_details': {'used': 0, 'maximum': 512},
'nic_details': [],
'driver': 'vmwareapi',
'state': 'running',
'version': '1.0',
'cpu_details': [],
'disk_details': [],
'hypervisor_os': 'esxi',
'config_drive': 'False'}
instance = objects.Instance(uuid=self.uuid,
config_drive=False,
system_metadata={},
node=self.instance_node)
actual = self.conn.get_instance_diagnostics(instance)
self.assertThat(actual.serialize(), matchers.DictMatches(expected))
def test_get_console_output(self):
self.assertRaises(NotImplementedError, self.conn.get_console_output,
None, None)
def test_get_vnc_console_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
self.conn.get_vnc_console,
self.context,
self.instance)
def _test_get_vnc_console(self):
self._create_vm()
fake_vm = self._get_vm_record()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
opt_val = OptionValue(key='', value=5906)
fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
vnc_console = self.conn.get_vnc_console(self.context, self.instance)
self.assertEqual(self.vnc_host, vnc_console.host)
self.assertEqual(5906, vnc_console.port)
def test_get_vnc_console(self):
self._test_get_vnc_console()
def test_get_vnc_console_noport(self):
self._create_vm()
self.assertRaises(exception.ConsoleTypeUnavailable,
self.conn.get_vnc_console,
self.context,
self.instance)
def test_get_volume_connector(self):
self._create_vm()
connector_dict = self.conn.get_volume_connector(self.instance)
fake_vm = self._get_vm_record()
fake_vm_id = fake_vm.obj.value
self.assertEqual('test_url', connector_dict['ip'])
self.assertEqual('iscsi-name', connector_dict['initiator'])
self.assertEqual('test_url', connector_dict['host'])
self.assertEqual(fake_vm_id, connector_dict['instance'])
def _test_vmdk_connection_info(self, type):
return {'driver_volume_type': type,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
def test_volume_attach_vmdk(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_attach_volume_vmdk')
volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info,
self.instance, None)
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_volume_detach_vmdk(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_detach_volume_vmdk')
volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info,
self.instance)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_attach_vmdk_disk_to_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
adapter_type = constants.DEFAULT_ADAPTER_TYPE
disk_type = constants.DEFAULT_DISK_TYPE
disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811'
backing = mock.Mock(uuid=disk_uuid)
device = mock.Mock(backing=backing)
vmdk_info = vm_util.VmdkInfo('fake-path', adapter_type, disk_type, 64,
device)
with contextlib.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm'),
mock.patch.object(volumeops.VMwareVolumeOps,
'_update_volume_details')
) as (get_vm_ref, get_volume_ref, get_vmdk_info,
attach_disk_to_vm, update_volume_details):
self.conn.attach_volume(None, connection_info, self.instance,
'/dev/vdc')
get_vm_ref.assert_called_once_with(self.conn._session,
self.instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(mock.sentinel.vm_ref,
self.instance, adapter_type, disk_type, vmdk_path='fake-path')
update_volume_details.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data']['volume_id'],
disk_uuid)
def test_detach_vmdk_disk_from_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
with mock.patch.object(volumeops.VMwareVolumeOps,
'detach_volume') as detach_volume:
self.conn.detach_volume(connection_info, self.instance,
'/dev/vdc', encryption=None)
detach_volume.assert_called_once_with(connection_info,
self.instance)
def test_volume_attach_iscsi(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_attach_volume_iscsi')
volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info,
self.instance, None)
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_volume_detach_iscsi(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_detach_volume_iscsi')
volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info,
self.instance)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_attach_iscsi_disk_to_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
connection_info['data']['target_portal'] = 'fake_target_host:port'
connection_info['data']['target_iqn'] = 'fake_target_iqn'
mount_point = '/dev/vdc'
discover = ('fake_name', 'fake_uuid')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_get_target')
# simulate target not found
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn((None, None))
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_add_send_target_host')
# rescan gets called with target portal
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_rescan_hba')
volumeops.VMwareVolumeOps._iscsi_rescan_hba(
connection_info['data']['target_portal'])
# simulate target found
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn(discover)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_disk_to_vm')
volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(), 'rdmp',
device_name=mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_iscsi_rescan_hba(self):
fake_target_portal = 'fake_target_host:port'
host_storage_sys = vmwareapi_fake._get_objects(
"HostStorageSystem").objects[0]
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
# Check the host system does not have the send target
self.assertRaises(AttributeError, getattr, iscsi_hba,
'configuredSendTarget')
# Rescan HBA with the target portal
vops = volumeops.VMwareVolumeOps(self.conn._session)
vops._iscsi_rescan_hba(fake_target_portal)
# Check if HBA has the target portal configured
self.assertEqual('fake_target_host',
iscsi_hba.configuredSendTarget[0].address)
# Rescan HBA with same portal
vops._iscsi_rescan_hba(fake_target_portal)
self.assertEqual(1, len(iscsi_hba.configuredSendTarget))
def test_iscsi_get_target(self):
data = {'target_portal': 'fake_target_host:port',
'target_iqn': 'fake_target_iqn'}
host = vmwareapi_fake._get_objects('HostSystem').objects[0]
host._add_iscsi_target(data)
vops = volumeops.VMwareVolumeOps(self.conn._session)
result = vops._iscsi_get_target(data)
self.assertEqual(('fake-device', 'fake-uuid'), result)
def test_detach_iscsi_disk_from_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
connection_info['data']['target_portal'] = 'fake_target_portal'
connection_info['data']['target_iqn'] = 'fake_target_iqn'
mount_point = '/dev/vdc'
find = ('fake_name', 'fake_uuid')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_get_target')
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn(find)
self.mox.StubOutWithMock(vm_util, 'get_rdm_disk')
device = 'fake_device'
vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'detach_disk_from_vm')
volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
self.instance, device, destroy_disk=True)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_connection_info_get(self):
self._create_vm()
connector = self.conn.get_volume_connector(self.instance)
self.assertEqual('test_url', connector['ip'])
self.assertEqual('test_url', connector['host'])
self.assertEqual('iscsi-name', connector['initiator'])
self.assertIn('instance', connector)
def test_connection_info_get_after_destroy(self):
self._create_vm()
self.conn.destroy(self.context, self.instance, self.network_info)
connector = self.conn.get_volume_connector(self.instance)
self.assertEqual('test_url', connector['ip'])
self.assertEqual('test_url', connector['host'])
self.assertEqual('iscsi-name', connector['initiator'])
self.assertNotIn('instance', connector)
def test_refresh_instance_security_rules(self):
self.assertRaises(NotImplementedError,
self.conn.refresh_instance_security_rules,
instance=None)
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_image_aging_image_used(self, mock_get_by_inst):
self._create_vm()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist()
def _get_timestamp_filename(self):
return '%s%s' % (imagecache.TIMESTAMP_PREFIX,
timeutils.strtime(at=self.old_time,
fmt=imagecache.TIMESTAMP_FORMAT))
def _override_time(self):
self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00)
def _fake_get_timestamp_filename(fake):
return self._get_timestamp_filename()
self.stubs.Set(imagecache.ImageCacheManager, '_get_timestamp_filename',
_fake_get_timestamp_filename)
def _timestamp_file_exists(self, exists=True):
timestamp = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
self._get_timestamp_filename() + '/')
if exists:
vmwareapi_fake.assertPathExists(self, str(timestamp))
else:
vmwareapi_fake.assertPathNotExists(self, str(timestamp))
def _image_aging_image_marked_for_deletion(self):
self._create_vm(uuid=uuidutils.generate_uuid())
self._cached_files_exist()
all_instances = []
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist()
self._timestamp_file_exists()
def test_image_aging_image_marked_for_deletion(self):
self._override_time()
self._image_aging_image_marked_for_deletion()
def _timestamp_file_removed(self):
self._override_time()
self._image_aging_image_marked_for_deletion()
self._create_vm(num_instances=2,
uuid=uuidutils.generate_uuid())
self._timestamp_file_exists(exists=False)
def test_timestamp_file_removed_spawn(self):
self._timestamp_file_removed()
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_timestamp_file_removed_aging(self, mock_get_by_inst):
self._timestamp_file_removed()
ts = self._get_timestamp_filename()
ts_path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid, ts + '/')
vmwareapi_fake._add_file(str(ts_path))
self._timestamp_file_exists()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
self._timestamp_file_exists(exists=False)
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_image_aging_disabled(self, mock_get_by_inst):
self._override_time()
self.flags(remove_unused_base_images=False)
self._create_vm()
self._cached_files_exist()
all_instances = []
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist(exists=True)
self._timestamp_file_exists(exists=False)
def _image_aging_aged(self, aging_time=100):
self._override_time()
cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.flags(remove_unused_original_minimum_age_seconds=aging_time)
self._image_aging_image_marked_for_deletion()
all_instances = []
timeutils.set_time_override(cur_time)
self.conn.manage_image_cache(self.context, all_instances)
def test_image_aging_aged(self):
self._image_aging_aged(aging_time=8)
self._cached_files_exist(exists=False)
def test_image_aging_not_aged(self):
self._image_aging_aged()
self._cached_files_exist()
def test_public_api_signatures(self):
self.assertPublicAPISignatures(v_driver.ComputeDriver(None), self.conn)
def test_register_extension(self):
with mock.patch.object(self.conn._session, '_call_method',
return_value=None) as mock_call_method:
self.conn._register_openstack_extension()
mock_call_method.assert_has_calls(
[mock.call(oslo_vim_util, 'find_extension',
constants.EXTENSION_KEY),
mock.call(oslo_vim_util, 'register_extension',
constants.EXTENSION_KEY,
constants.EXTENSION_TYPE_INSTANCE)])
def test_register_extension_already_exists(self):
with mock.patch.object(self.conn._session, '_call_method',
return_value='fake-extension') as mock_find_ext:
self.conn._register_openstack_extension()
mock_find_ext.assert_called_once_with(oslo_vim_util,
'find_extension',
constants.EXTENSION_KEY)
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
def _setup_mocks_for_session(self, mock_init):
mock_init.return_value = None
vcdriver = driver.VMwareVCDriver(None, False)
vcdriver._session = mock.Mock()
vcdriver._session.vim = None
def side_effect():
vcdriver._session.vim = mock.Mock()
vcdriver._session._create_session.side_effect = side_effect
return vcdriver
def test_host_power_action(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'action')
def test_host_maintenance_mode(self):
self.assertRaises(NotImplementedError,
self.conn.host_maintenance_mode, 'host', 'mode')
def test_set_host_enabled(self):
self.assertRaises(NotImplementedError,
self.conn.set_host_enabled, 'state')
def test_datastore_regex_configured(self):
self.assertEqual(self.conn._datastore_regex,
self.conn._vmops._datastore_regex)
self.assertEqual(self.conn._datastore_regex,
self.conn._vc_state._datastore_regex)
@mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
def test_datastore_regex_configured_vcstate(self, mock_get_ds_ref):
vcstate = self.conn._vc_state
self.conn.get_available_resource(self.node_name)
mock_get_ds_ref.assert_called_with(
vcstate._session, vcstate._cluster, vcstate._datastore_regex)
def test_get_available_resource(self):
stats = self.conn.get_available_resource(self.node_name)
self.assertEqual(32, stats['vcpus'])
self.assertEqual(1024, stats['local_gb'])
self.assertEqual(1024 - 500, stats['local_gb_used'])
self.assertEqual(1000, stats['memory_mb'])
self.assertEqual(500, stats['memory_mb_used'])
self.assertEqual('VMware vCenter Server', stats['hypervisor_type'])
self.assertEqual(5001000, stats['hypervisor_version'])
self.assertEqual(self.node_name, stats['hypervisor_hostname'])
self.assertIsNone(stats['cpu_info'])
self.assertEqual(
'[["i686", "vmware", "hvm"], ["x86_64", "vmware", "hvm"]]',
stats['supported_instances'])
def test_invalid_datastore_regex(self):
# Tests if we raise an exception for Invalid Regular Expression in
# vmware_datastore_regex
self.flags(cluster_name='test_cluster', datastore_regex='fake-ds(01',
group='vmware')
self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
def test_get_available_nodes(self):
nodelist = self.conn.get_available_nodes()
self.assertEqual(1, len(nodelist))
self.assertIn(self.node_name, nodelist)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_sparse_image(self, mock_from_image):
img_info = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1024,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=False)
mock_from_image.return_value = img_info
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self._create_instance()
self.assertRaises(NotImplementedError,
self.conn.plug_vifs,
instance=self.instance, network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self._create_instance()
self.assertRaises(NotImplementedError,
self.conn.unplug_vifs,
instance=self.instance, network_info=None)
def _create_vif(self):
gw_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_4 = network_model.IP(address='8.8.8.8', type=None)
subnet_4 = network_model.Subnet(cidr='101.168.1.0/24',
dns=[dns_4],
gateway=gw_4,
routes=None,
dhcp_server='191.168.1.1')
gw_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gw_6,
ips=None,
routes=None)
network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_4,
subnet_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
return vif_bridge_neutron
def _validate_interfaces(self, id, index, num_iface_ids):
vm = self._get_vm_record()
found_iface_id = False
extras = vm.get("config.extraConfig")
key = "nvp.iface-id.%s" % index
num_found = 0
for c in extras.OptionValue:
if c.key.startswith("nvp.iface-id."):
num_found += 1
if c.key == key and c.value == id:
found_iface_id = True
self.assertTrue(found_iface_id)
self.assertEqual(num_iface_ids, num_found)
def _attach_interface(self, vif):
self.conn.attach_interface(self.instance, self.image, vif)
self._validate_interfaces(vif['id'], 1, 2)
def test_attach_interface(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
def test_attach_interface_with_exception(self):
self._create_vm()
vif = self._create_vif()
with mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=Exception):
self.assertRaises(exception.InterfaceAttachFailed,
self.conn.attach_interface,
self.instance, self.image, vif)
@mock.patch.object(vif, 'get_network_device',
return_value='fake_device')
def _detach_interface(self, vif, mock_get_device):
self._create_vm()
self._attach_interface(vif)
self.conn.detach_interface(self.instance, vif)
self._validate_interfaces('free', 1, 2)
def test_detach_interface(self):
vif = self._create_vif()
self._detach_interface(vif)
def test_detach_interface_and_attach(self):
vif = self._create_vif()
self._detach_interface(vif)
self.conn.attach_interface(self.instance, self.image, vif)
self._validate_interfaces(vif['id'], 1, 2)
def test_detach_interface_no_device(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
self.assertRaises(exception.NotFound, self.conn.detach_interface,
self.instance, vif)
def test_detach_interface_no_vif_match(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
vif['id'] = 'bad-id'
self.assertRaises(exception.NotFound, self.conn.detach_interface,
self.instance, vif)
@mock.patch.object(vif, 'get_network_device',
return_value='fake_device')
def test_detach_interface_with_exception(self, mock_get_device):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
with mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=Exception):
self.assertRaises(exception.InterfaceDetachFailed,
self.conn.detach_interface,
self.instance, vif)
def test_resize_to_smaller_disk(self):
self._create_vm(instance_type='m1.large')
flavor = self._get_instance_type_by_name('m1.small')
self.assertRaises(exception.InstanceFaultRollback,
self.conn.migrate_disk_and_power_off, self.context,
self.instance, 'fake_dest', flavor, None)
def test_spawn_attach_volume_vmdk(self):
self._spawn_attach_volume_vmdk()
def test_spawn_attach_volume_vmdk_no_image_ref(self):
self._spawn_attach_volume_vmdk(set_image_ref=False)
def test_pause(self):
# Tests that the VMwareVCDriver does not implement the pause method.
self._create_instance()
self.assertRaises(NotImplementedError, self.conn.pause, self.instance)
def test_unpause(self):
# Tests that the VMwareVCDriver does not implement the unpause method.
self._create_instance()
self.assertRaises(NotImplementedError, self.conn.unpause,
self.instance)
def test_datastore_dc_map(self):
self.assertEqual({}, ds_util._DS_DC_MAPPING)
self._create_vm()
# currently there are 2 data stores
self.assertEqual(2, len(ds_util._DS_DC_MAPPING))
def test_rollback_live_migration_at_destination(self):
with mock.patch.object(self.conn, "destroy") as mock_destroy:
self.conn.rollback_live_migration_at_destination(self.context,
"instance", [], None)
mock_destroy.assert_called_once_with(self.context,
"instance", [], None)
def test_get_instance_disk_info_is_implemented(self):
# Ensure that the method has been implemented in the driver
instance = objects.Instance()
try:
disk_info = self.conn.get_instance_disk_info(instance)
self.assertIsNone(disk_info)
except NotImplementedError:
self.fail("test_get_instance_disk_info() should not raise "
"NotImplementedError")
def test_get_host_uptime(self):
self.assertRaises(NotImplementedError,
self.conn.get_host_uptime)
def test_pbm_wsdl_location(self):
self.flags(pbm_enabled=True,
pbm_wsdl_location='fira',
group='vmware')
self.conn._update_pbm_location()
self.assertEqual('fira', self.conn._session._pbm_wsdl_loc)
self.assertIsNone(self.conn._session._pbm)
def test_nodename(self):
test_mor = "domain-26"
self.assertEqual("%s.%s" % (test_mor,
vmwareapi_fake._FAKE_VCENTER_UUID),
self.conn._create_nodename(test_mor),
"VC driver failed to create the proper node name")
@mock.patch.object(driver.LOG, 'warning')
def test_min_version(self, mock_warning):
self.conn._check_min_version()
self.assertFalse(mock_warning.called)
@mock.patch.object(driver.LOG, 'warning')
@mock.patch.object(oslo_vim_util, 'get_vc_version',
return_value='5.0.0')
def test_invalid_min_version(self, mock_version, mock_warning):
self.conn._check_min_version()
# assert that the min version is in a warning message
expected_arg = {'version': constants.MIN_VC_VERSION}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
| |
from __future__ import unicode_literals
from collections import OrderedDict
import keyword
import re
from django.core.management.base import BaseCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(BaseCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
requires_system_checks = False
db_module = 'django.db'
def add_arguments(self, parser):
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.')
def handle(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options['database']]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: re.sub(r'[^a-zA-Z0-9]', '', table_name.title())
strip_prefix = lambda s: s[1:] if s.startswith("u'") else s
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield (
"# * Remove `managed = False` lines if you wish to allow "
"Django to create, modify, and delete the table"
)
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin sqlcustom [app_label]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
try:
constraints = connection.introspection.get_constraints(cursor, table_name)
except NotImplementedError:
constraints = {}
used_column_names = [] # Holds column names used in the table so far
for row in connection.introspection.get_table_description(cursor, table_name):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = OrderedDict() # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = column_name in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = "self" if relations[column_name][1] == table_name else table2model(relations[column_name][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional parameters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField(':
field_type = 'NullBooleanField('
else:
extra_params['blank'] = True
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join(
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items())
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name, constraints):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = OrderedDict()
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for data_types_reverse to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name, constraints):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
unique_together = []
for index, params in constraints.items():
if params['unique']:
columns = params['columns']
if len(columns) > 1:
# we do not want to include the u"" or u'' prefix
# so we build the string rather than interpolate the tuple
tup = '(' + ', '.join("'%s'" % c for c in columns) + ')'
unique_together.append(tup)
meta = ["",
" class Meta:",
" managed = False",
" db_table = '%s'" % table_name]
if unique_together:
tup = '(' + ', '.join(unique_together) + ',)'
meta += [" unique_together = %s" % tup]
return meta
| |
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import datetime
import enum
import hashlib
import logging
import re
import time
from typing import List, Optional, Tuple
from absl import flags
from absl.testing import absltest
from google.protobuf import json_format
import grpc
from framework import xds_flags
from framework import xds_k8s_flags
from framework.helpers import retryers
import framework.helpers.rand
from framework.infrastructure import gcp
from framework.infrastructure import k8s
from framework.infrastructure import traffic_director
from framework.rpc import grpc_channelz
from framework.rpc import grpc_testing
from framework.test_app import client_app
from framework.test_app import server_app
logger = logging.getLogger(__name__)
_FORCE_CLEANUP = flags.DEFINE_bool(
"force_cleanup",
default=False,
help="Force resource cleanup, even if not created by this test run")
# TODO(yashkt): We will no longer need this flag once Core exposes local certs
# from channelz
_CHECK_LOCAL_CERTS = flags.DEFINE_bool(
"check_local_certs",
default=True,
help="Security Tests also check the value of local certs")
flags.adopt_module_key_flags(xds_flags)
flags.adopt_module_key_flags(xds_k8s_flags)
# Type aliases
TrafficDirectorManager = traffic_director.TrafficDirectorManager
TrafficDirectorAppNetManager = traffic_director.TrafficDirectorAppNetManager
TrafficDirectorSecureManager = traffic_director.TrafficDirectorSecureManager
XdsTestServer = server_app.XdsTestServer
XdsTestClient = client_app.XdsTestClient
KubernetesServerRunner = server_app.KubernetesServerRunner
KubernetesClientRunner = client_app.KubernetesClientRunner
LoadBalancerStatsResponse = grpc_testing.LoadBalancerStatsResponse
_ChannelState = grpc_channelz.ChannelState
_timedelta = datetime.timedelta
_TD_CONFIG_MAX_WAIT_SEC = 600
class XdsKubernetesTestCase(absltest.TestCase, metaclass=abc.ABCMeta):
_resource_suffix_randomize: bool = True
client_namespace: str
client_runner: KubernetesClientRunner
gcp_api_manager: gcp.api.GcpApiManager
k8s_api_manager: k8s.KubernetesApiManager
resource_prefix: str
resource_suffix: str = ''
server_namespace: str
server_runner: KubernetesServerRunner
server_xds_port: int
td: TrafficDirectorManager
config_scope: str
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
# GCP
cls.project: str = xds_flags.PROJECT.value
cls.network: str = xds_flags.NETWORK.value
cls.gcp_service_account: str = xds_k8s_flags.GCP_SERVICE_ACCOUNT.value
cls.td_bootstrap_image = xds_k8s_flags.TD_BOOTSTRAP_IMAGE.value
cls.xds_server_uri = xds_flags.XDS_SERVER_URI.value
cls.ensure_firewall = xds_flags.ENSURE_FIREWALL.value
cls.firewall_allowed_ports = xds_flags.FIREWALL_ALLOWED_PORTS.value
cls.compute_api_version = xds_flags.COMPUTE_API_VERSION.value
# Resource names.
cls.resource_prefix = xds_flags.RESOURCE_PREFIX.value
if xds_flags.RESOURCE_SUFFIX.value is not None:
cls._resource_suffix_randomize = False
cls.resource_suffix = xds_flags.RESOURCE_SUFFIX.value
# Test server
cls.server_image = xds_k8s_flags.SERVER_IMAGE.value
cls.server_name = xds_flags.SERVER_NAME.value
cls.server_port = xds_flags.SERVER_PORT.value
cls.server_maintenance_port = xds_flags.SERVER_MAINTENANCE_PORT.value
cls.server_xds_host = xds_flags.SERVER_NAME.value
cls.server_xds_port = xds_flags.SERVER_XDS_PORT.value
# Test client
cls.client_image = xds_k8s_flags.CLIENT_IMAGE.value
cls.client_name = xds_flags.CLIENT_NAME.value
cls.client_port = xds_flags.CLIENT_PORT.value
# Test suite settings
cls.force_cleanup = _FORCE_CLEANUP.value
cls.debug_use_port_forwarding = \
xds_k8s_flags.DEBUG_USE_PORT_FORWARDING.value
cls.enable_workload_identity = xds_k8s_flags.ENABLE_WORKLOAD_IDENTITY.value
cls.check_local_certs = _CHECK_LOCAL_CERTS.value
# Resource managers
cls.k8s_api_manager = k8s.KubernetesApiManager(
xds_k8s_flags.KUBE_CONTEXT.value)
cls.secondary_k8s_api_manager = k8s.KubernetesApiManager(
xds_k8s_flags.SECONDARY_KUBE_CONTEXT.value)
cls.gcp_api_manager = gcp.api.GcpApiManager()
def setUp(self):
"""Hook method for setting up the test fixture before exercising it."""
super().setUp()
if self._resource_suffix_randomize:
self.resource_suffix = framework.helpers.rand.random_resource_suffix(
)
logger.info('Test run resource prefix: %s, suffix: %s',
self.resource_prefix, self.resource_suffix)
if xds_flags.CONFIG_SCOPE.value is not None:
self.config_scope = xds_flags.CONFIG_SCOPE.value + "-" + framework.helpers.rand.random_resource_suffix(
)
else:
self.config_scope = None
# TD Manager
self.td = self.initTrafficDirectorManager()
# Test Server runner
self.server_namespace = KubernetesServerRunner.make_namespace_name(
self.resource_prefix, self.resource_suffix)
self.server_runner = self.initKubernetesServerRunner()
# Test Client runner
self.client_namespace = KubernetesClientRunner.make_namespace_name(
self.resource_prefix, self.resource_suffix)
self.client_runner = self.initKubernetesClientRunner()
# Ensures the firewall exist
if self.ensure_firewall:
self.td.create_firewall_rule(
allowed_ports=self.firewall_allowed_ports)
# Randomize xds port, when it's set to 0
if self.server_xds_port == 0:
# TODO(sergiitk): this is prone to race conditions:
# The port might not me taken now, but there's not guarantee
# it won't be taken until the tests get to creating
# forwarding rule. This check is better than nothing,
# but we should find a better approach.
self.server_xds_port = self.td.find_unused_forwarding_rule_port()
logger.info('Found unused xds port: %s', self.server_xds_port)
@abc.abstractmethod
def initTrafficDirectorManager(self) -> TrafficDirectorManager:
raise NotImplementedError
@abc.abstractmethod
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
raise NotImplementedError
@abc.abstractmethod
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
raise NotImplementedError
@classmethod
def tearDownClass(cls):
cls.k8s_api_manager.close()
cls.secondary_k8s_api_manager.close()
cls.gcp_api_manager.close()
def tearDown(self):
logger.info('----- TestMethod %s teardown -----', self.id())
retryer = retryers.constant_retryer(wait_fixed=_timedelta(seconds=10),
attempts=3,
log_level=logging.INFO)
try:
retryer(self._cleanup)
except retryers.RetryError:
logger.exception('Got error during teardown')
def _cleanup(self):
self.td.cleanup(force=self.force_cleanup)
self.client_runner.cleanup(force=self.force_cleanup)
self.server_runner.cleanup(force=self.force_cleanup,
force_namespace=self.force_cleanup)
def setupTrafficDirectorGrpc(self):
self.td.setup_for_grpc(self.server_xds_host,
self.server_xds_port,
health_check_port=self.server_maintenance_port)
def setupServerBackends(self,
*,
wait_for_healthy_status=True,
server_runner=None,
max_rate_per_endpoint: Optional[int] = None):
if server_runner is None:
server_runner = self.server_runner
# Load Backends
neg_name, neg_zones = server_runner.k8s_namespace.get_service_neg(
server_runner.service_name, self.server_port)
# Add backends to the Backend Service
self.td.backend_service_add_neg_backends(
neg_name, neg_zones, max_rate_per_endpoint=max_rate_per_endpoint)
if wait_for_healthy_status:
self.td.wait_for_backends_healthy_status()
def removeServerBackends(self, *, server_runner=None):
if server_runner is None:
server_runner = self.server_runner
# Load Backends
neg_name, neg_zones = server_runner.k8s_namespace.get_service_neg(
server_runner.service_name, self.server_port)
# Remove backends from the Backend Service
self.td.backend_service_remove_neg_backends(neg_name, neg_zones)
def assertSuccessfulRpcs(self,
test_client: XdsTestClient,
num_rpcs: int = 100):
lb_stats = self.getClientRpcStats(test_client, num_rpcs)
self.assertAllBackendsReceivedRpcs(lb_stats)
failed = int(lb_stats.num_failures)
self.assertLessEqual(
failed,
0,
msg=f'Expected all RPCs to succeed: {failed} of {num_rpcs} failed')
@staticmethod
def diffAccumulatedStatsPerMethod(
before: grpc_testing.LoadBalancerAccumulatedStatsResponse,
after: grpc_testing.LoadBalancerAccumulatedStatsResponse):
"""Only diffs stats_per_method, as the other fields are deprecated."""
diff = grpc_testing.LoadBalancerAccumulatedStatsResponse()
for method, method_stats in after.stats_per_method.items():
for status, count in method_stats.result.items():
count -= before.stats_per_method[method].result[status]
if count < 0:
raise AssertionError("Diff of count shouldn't be negative")
if count > 0:
diff.stats_per_method[method].result[status] = count
return diff
def assertRpcStatusCodes(self, test_client: XdsTestClient, *,
status_code: grpc.StatusCode, duration: _timedelta,
method: str) -> None:
"""Assert all RPCs for a method are completing with a certain status."""
# Sending with pre-set QPS for a period of time
before_stats = test_client.get_load_balancer_accumulated_stats()
logging.info(
'Received LoadBalancerAccumulatedStatsResponse from test client %s: before:\n%s',
test_client.ip, before_stats)
time.sleep(duration.total_seconds())
after_stats = test_client.get_load_balancer_accumulated_stats()
logging.info(
'Received LoadBalancerAccumulatedStatsResponse from test client %s: after:\n%s',
test_client.ip, after_stats)
diff_stats = self.diffAccumulatedStatsPerMethod(before_stats,
after_stats)
stats = diff_stats.stats_per_method[method]
status = status_code.value[0]
for found_status, count in stats.result.items():
if found_status != status and count > 0:
self.fail(f"Expected only status {status} but found status "
f"{found_status} for method {method}:\n{diff_stats}")
self.assertGreater(stats.result[status_code.value[0]], 0)
def assertRpcsEventuallyGoToGivenServers(self,
test_client: XdsTestClient,
servers: List[XdsTestServer],
num_rpcs: int = 100):
retryer = retryers.constant_retryer(
wait_fixed=datetime.timedelta(seconds=1),
timeout=datetime.timedelta(seconds=_TD_CONFIG_MAX_WAIT_SEC),
log_level=logging.INFO)
try:
retryer(self._assertRpcsEventuallyGoToGivenServers, test_client,
servers, num_rpcs)
except retryers.RetryError:
logger.exception(
'Rpcs did not go to expected servers before timeout %s',
_TD_CONFIG_MAX_WAIT_SEC)
def _assertRpcsEventuallyGoToGivenServers(self, test_client: XdsTestClient,
servers: List[XdsTestServer],
num_rpcs: int):
server_names = [server.pod_name for server in servers]
logger.info(f'Verifying RPCs go to {server_names}')
lb_stats = self.getClientRpcStats(test_client, num_rpcs)
failed = int(lb_stats.num_failures)
self.assertLessEqual(
failed,
0,
msg=f'Expected all RPCs to succeed: {failed} of {num_rpcs} failed')
for server_name in server_names:
self.assertIn(server_name, lb_stats.rpcs_by_peer,
f'{server_name} did not receive RPCs')
for peer in lb_stats.rpcs_by_peer.keys():
self.assertIn(peer, server_names,
f'Unexpected server {peer} received RPCs')
def assertXdsConfigExists(self, test_client: XdsTestClient):
config = test_client.csds.fetch_client_status(log_level=logging.INFO)
self.assertIsNotNone(config)
seen = set()
want = frozenset([
'listener_config',
'cluster_config',
'route_config',
'endpoint_config',
])
for xds_config in config.xds_config:
seen.add(xds_config.WhichOneof('per_xds_config'))
for generic_xds_config in config.generic_xds_configs:
if re.search(r'\.Listener$', generic_xds_config.type_url):
seen.add('listener_config')
elif re.search(r'\.RouteConfiguration$',
generic_xds_config.type_url):
seen.add('route_config')
elif re.search(r'\.Cluster$', generic_xds_config.type_url):
seen.add('cluster_config')
elif re.search(r'\.ClusterLoadAssignment$',
generic_xds_config.type_url):
seen.add('endpoint_config')
logger.debug('Received xDS config dump: %s',
json_format.MessageToJson(config, indent=2))
self.assertSameElements(want, seen)
def assertFailedRpcs(self,
test_client: XdsTestClient,
num_rpcs: Optional[int] = 100):
lb_stats = self.getClientRpcStats(test_client, num_rpcs)
failed = int(lb_stats.num_failures)
self.assertEqual(
failed,
num_rpcs,
msg=f'Expected all RPCs to fail: {failed} of {num_rpcs} failed')
@staticmethod
def getClientRpcStats(test_client: XdsTestClient,
num_rpcs: int) -> LoadBalancerStatsResponse:
lb_stats = test_client.get_load_balancer_stats(num_rpcs=num_rpcs)
logger.info(
'Received LoadBalancerStatsResponse from test client %s:\n%s',
test_client.ip, lb_stats)
return lb_stats
def assertAllBackendsReceivedRpcs(self, lb_stats):
# TODO(sergiitk): assert backends length
for backend, rpcs_count in lb_stats.rpcs_by_peer.items():
self.assertGreater(
int(rpcs_count),
0,
msg=f'Backend {backend} did not receive a single RPC')
class RegularXdsKubernetesTestCase(XdsKubernetesTestCase):
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
super().setUpClass()
if cls.server_maintenance_port is None:
cls.server_maintenance_port = KubernetesServerRunner.DEFAULT_MAINTENANCE_PORT
def initTrafficDirectorManager(self) -> TrafficDirectorManager:
return TrafficDirectorManager(
self.gcp_api_manager,
project=self.project,
resource_prefix=self.resource_prefix,
resource_suffix=self.resource_suffix,
network=self.network,
compute_api_version=self.compute_api_version)
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
return KubernetesServerRunner(
k8s.KubernetesNamespace(self.k8s_api_manager,
self.server_namespace),
deployment_name=self.server_name,
image_name=self.server_image,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
xds_server_uri=self.xds_server_uri,
network=self.network,
debug_use_port_forwarding=self.debug_use_port_forwarding,
enable_workload_identity=self.enable_workload_identity)
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
return KubernetesClientRunner(
k8s.KubernetesNamespace(self.k8s_api_manager,
self.client_namespace),
deployment_name=self.client_name,
image_name=self.client_image,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
xds_server_uri=self.xds_server_uri,
network=self.network,
config_scope=self.config_scope,
debug_use_port_forwarding=self.debug_use_port_forwarding,
enable_workload_identity=self.enable_workload_identity,
stats_port=self.client_port,
reuse_namespace=self.server_namespace == self.client_namespace)
def startTestServers(self,
replica_count=1,
server_runner=None,
**kwargs) -> List[XdsTestServer]:
if server_runner is None:
server_runner = self.server_runner
test_servers = server_runner.run(
replica_count=replica_count,
test_port=self.server_port,
maintenance_port=self.server_maintenance_port,
**kwargs)
for test_server in test_servers:
test_server.set_xds_address(self.server_xds_host,
self.server_xds_port)
return test_servers
def startTestClient(self, test_server: XdsTestServer,
**kwargs) -> XdsTestClient:
test_client = self.client_runner.run(server_target=test_server.xds_uri,
**kwargs)
test_client.wait_for_active_server_channel()
return test_client
class AppNetXdsKubernetesTestCase(RegularXdsKubernetesTestCase):
td: TrafficDirectorAppNetManager
def initTrafficDirectorManager(self) -> TrafficDirectorAppNetManager:
return TrafficDirectorAppNetManager(
self.gcp_api_manager,
project=self.project,
resource_prefix=self.resource_prefix,
resource_suffix=self.resource_suffix,
network=self.network,
config_scope=self.config_scope,
compute_api_version=self.compute_api_version)
class SecurityXdsKubernetesTestCase(XdsKubernetesTestCase):
td: TrafficDirectorSecureManager
class SecurityMode(enum.Enum):
MTLS = enum.auto()
TLS = enum.auto()
PLAINTEXT = enum.auto()
@classmethod
def setUpClass(cls):
"""Hook method for setting up class fixture before running tests in
the class.
"""
super().setUpClass()
if cls.server_maintenance_port is None:
# In secure mode, the maintenance port is different from
# the test port to keep it insecure, and make
# Health Checks and Channelz tests available.
# When not provided, use explicit numeric port value, so
# Backend Health Checks are created on a fixed port.
cls.server_maintenance_port = KubernetesServerRunner.DEFAULT_SECURE_MODE_MAINTENANCE_PORT
def initTrafficDirectorManager(self) -> TrafficDirectorSecureManager:
return TrafficDirectorSecureManager(
self.gcp_api_manager,
project=self.project,
resource_prefix=self.resource_prefix,
resource_suffix=self.resource_suffix,
network=self.network,
compute_api_version=self.compute_api_version)
def initKubernetesServerRunner(self) -> KubernetesServerRunner:
return KubernetesServerRunner(
k8s.KubernetesNamespace(self.k8s_api_manager,
self.server_namespace),
deployment_name=self.server_name,
image_name=self.server_image,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
network=self.network,
xds_server_uri=self.xds_server_uri,
deployment_template='server-secure.deployment.yaml',
debug_use_port_forwarding=self.debug_use_port_forwarding)
def initKubernetesClientRunner(self) -> KubernetesClientRunner:
return KubernetesClientRunner(
k8s.KubernetesNamespace(self.k8s_api_manager,
self.client_namespace),
deployment_name=self.client_name,
image_name=self.client_image,
td_bootstrap_image=self.td_bootstrap_image,
gcp_project=self.project,
gcp_api_manager=self.gcp_api_manager,
gcp_service_account=self.gcp_service_account,
xds_server_uri=self.xds_server_uri,
network=self.network,
config_scope=self.config_scope,
deployment_template='client-secure.deployment.yaml',
stats_port=self.client_port,
reuse_namespace=self.server_namespace == self.client_namespace,
debug_use_port_forwarding=self.debug_use_port_forwarding)
def startSecureTestServer(self, replica_count=1, **kwargs) -> XdsTestServer:
test_server = self.server_runner.run(
replica_count=replica_count,
test_port=self.server_port,
maintenance_port=self.server_maintenance_port,
secure_mode=True,
**kwargs)[0]
test_server.set_xds_address(self.server_xds_host, self.server_xds_port)
return test_server
def setupSecurityPolicies(self, *, server_tls, server_mtls, client_tls,
client_mtls):
self.td.setup_client_security(server_namespace=self.server_namespace,
server_name=self.server_name,
tls=client_tls,
mtls=client_mtls)
self.td.setup_server_security(server_namespace=self.server_namespace,
server_name=self.server_name,
server_port=self.server_port,
tls=server_tls,
mtls=server_mtls)
def startSecureTestClient(self,
test_server: XdsTestServer,
*,
wait_for_active_server_channel=True,
**kwargs) -> XdsTestClient:
test_client = self.client_runner.run(server_target=test_server.xds_uri,
secure_mode=True,
**kwargs)
if wait_for_active_server_channel:
test_client.wait_for_active_server_channel()
return test_client
def assertTestAppSecurity(self, mode: SecurityMode,
test_client: XdsTestClient,
test_server: XdsTestServer):
client_socket, server_socket = self.getConnectedSockets(
test_client, test_server)
server_security: grpc_channelz.Security = server_socket.security
client_security: grpc_channelz.Security = client_socket.security
logger.info('Server certs: %s', self.debug_sock_certs(server_security))
logger.info('Client certs: %s', self.debug_sock_certs(client_security))
if mode is self.SecurityMode.MTLS:
self.assertSecurityMtls(client_security, server_security)
elif mode is self.SecurityMode.TLS:
self.assertSecurityTls(client_security, server_security)
elif mode is self.SecurityMode.PLAINTEXT:
self.assertSecurityPlaintext(client_security, server_security)
else:
raise TypeError('Incorrect security mode')
def assertSecurityMtls(self, client_security: grpc_channelz.Security,
server_security: grpc_channelz.Security):
self.assertEqual(client_security.WhichOneof('model'),
'tls',
msg='(mTLS) Client socket security model must be TLS')
self.assertEqual(server_security.WhichOneof('model'),
'tls',
msg='(mTLS) Server socket security model must be TLS')
server_tls, client_tls = server_security.tls, client_security.tls
# Confirm regular TLS: server local cert == client remote cert
self.assertNotEmpty(client_tls.remote_certificate,
msg="(mTLS) Client remote certificate is missing")
if self.check_local_certs:
self.assertNotEmpty(
server_tls.local_certificate,
msg="(mTLS) Server local certificate is missing")
self.assertEqual(
server_tls.local_certificate,
client_tls.remote_certificate,
msg="(mTLS) Server local certificate must match client's "
"remote certificate")
# mTLS: server remote cert == client local cert
self.assertNotEmpty(server_tls.remote_certificate,
msg="(mTLS) Server remote certificate is missing")
if self.check_local_certs:
self.assertNotEmpty(
client_tls.local_certificate,
msg="(mTLS) Client local certificate is missing")
self.assertEqual(
server_tls.remote_certificate,
client_tls.local_certificate,
msg="(mTLS) Server remote certificate must match client's "
"local certificate")
def assertSecurityTls(self, client_security: grpc_channelz.Security,
server_security: grpc_channelz.Security):
self.assertEqual(client_security.WhichOneof('model'),
'tls',
msg='(TLS) Client socket security model must be TLS')
self.assertEqual(server_security.WhichOneof('model'),
'tls',
msg='(TLS) Server socket security model must be TLS')
server_tls, client_tls = server_security.tls, client_security.tls
# Regular TLS: server local cert == client remote cert
self.assertNotEmpty(client_tls.remote_certificate,
msg="(TLS) Client remote certificate is missing")
if self.check_local_certs:
self.assertNotEmpty(server_tls.local_certificate,
msg="(TLS) Server local certificate is missing")
self.assertEqual(
server_tls.local_certificate,
client_tls.remote_certificate,
msg="(TLS) Server local certificate must match client "
"remote certificate")
# mTLS must not be used
self.assertEmpty(
server_tls.remote_certificate,
msg="(TLS) Server remote certificate must be empty in TLS mode. "
"Is server security incorrectly configured for mTLS?")
self.assertEmpty(
client_tls.local_certificate,
msg="(TLS) Client local certificate must be empty in TLS mode. "
"Is client security incorrectly configured for mTLS?")
def assertSecurityPlaintext(self, client_security, server_security):
server_tls, client_tls = server_security.tls, client_security.tls
# Not TLS
self.assertEmpty(
server_tls.local_certificate,
msg="(Plaintext) Server local certificate must be empty.")
self.assertEmpty(
client_tls.local_certificate,
msg="(Plaintext) Client local certificate must be empty.")
# Not mTLS
self.assertEmpty(
server_tls.remote_certificate,
msg="(Plaintext) Server remote certificate must be empty.")
self.assertEmpty(
client_tls.local_certificate,
msg="(Plaintext) Client local certificate must be empty.")
def assertClientCannotReachServerRepeatedly(
self,
test_client: XdsTestClient,
*,
times: Optional[int] = None,
delay: Optional[_timedelta] = None):
"""
Asserts that the client repeatedly cannot reach the server.
With negative tests we can't be absolutely certain expected failure
state is not caused by something else.
To mitigate for this, we repeat the checks several times, and expect
all of them to succeed.
This is useful in case the channel eventually stabilizes, and RPCs pass.
Args:
test_client: An instance of XdsTestClient
times: Optional; A positive number of times to confirm that
the server is unreachable. Defaults to `3` attempts.
delay: Optional; Specifies how long to wait before the next check.
Defaults to `10` seconds.
"""
if times is None or times < 1:
times = 3
if delay is None:
delay = _timedelta(seconds=10)
for i in range(1, times + 1):
self.assertClientCannotReachServer(test_client)
if i < times:
logger.info('Check %s passed, waiting %s before the next check',
i, delay)
time.sleep(delay.total_seconds())
def assertClientCannotReachServer(self, test_client: XdsTestClient):
self.assertClientChannelFailed(test_client)
self.assertFailedRpcs(test_client)
def assertClientChannelFailed(self, test_client: XdsTestClient):
channel = test_client.wait_for_server_channel_state(
state=_ChannelState.TRANSIENT_FAILURE)
subchannels = list(
test_client.channelz.list_channel_subchannels(channel))
self.assertLen(subchannels,
1,
msg="Client channel must have exactly one subchannel "
"in state TRANSIENT_FAILURE.")
@staticmethod
def getConnectedSockets(
test_client: XdsTestClient, test_server: XdsTestServer
) -> Tuple[grpc_channelz.Socket, grpc_channelz.Socket]:
client_sock = test_client.get_active_server_channel_socket()
server_sock = test_server.get_server_socket_matching_client(client_sock)
return client_sock, server_sock
@classmethod
def debug_sock_certs(cls, security: grpc_channelz.Security):
if security.WhichOneof('model') == 'other':
return f'other: <{security.other.name}={security.other.value}>'
return (f'local: <{cls.debug_cert(security.tls.local_certificate)}>, '
f'remote: <{cls.debug_cert(security.tls.remote_certificate)}>')
@staticmethod
def debug_cert(cert):
if not cert:
return 'missing'
sha1 = hashlib.sha1(cert)
return f'sha1={sha1.hexdigest()}, len={len(cert)}'
| |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright 2016 Lightcopy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.session import SparkSession
__all__ = ['Const', 'QueryContext', 'DataFrameIndexManager']
# Constants used through the package
class Const(object):
PARQUET_SOURCE = 'parquet'
METASTORE_LOCATION = 'spark.sql.index.metastore'
class CreateIndexCommand(object):
"""
'CreateIndexCommand' provides functionality to create index for a table. Requires index
columns and valid table path. Also allows to specify different mode for creating index
(org.apache.spark.sql.SaveMode) or its string representation similar to 'DataFrame.write'.
"""
def __init__(self, manager):
self._manager = manager
self._mode = None
self._columns = None
def mode(self, value):
"""
Set mode to create table. Several modes are available:
- 'append' append data to existing index, or create new one if dost not exist
- 'overwrite' delete index if one already exists for the table, and create it
- 'error' create index, raise an error if index already exists
- 'ignore' create index, and no-op if index already exists
Default mode is 'error', which is set in Scala code.
:param value: mode value (see possible options above)
:return: self
"""
self._mode = '%s' % (value)
return self
def indexBy(self, *columns):
"""
Set column or list of columns to use when building index. Statistics will be collected for
these columns and optionally filter statistics.
:param *columns: args of column names
:return: self
"""
self._columns = ['%s' % (column) for column in columns]
return self
def indexByAll(self):
"""
Set columns to None, which results in inferring all columns from further provided table
that can be used for indexing and creating index using those columns.
:return: self
"""
self._columns = None
return self
def _init_create_command(self):
"""
Consolidate options and return Java command to create index.
:return: java create command
"""
jcreate = self._manager._jdim.create()
# set mode if available, otherwise will use default value in Scala code
if self._mode:
jcreate.mode(self._mode)
# set columns, if columns is None, infer all possible columns from table using
# 'indexByAll', otherwise use provided list of column names and invoke 'indexBy'
if self._columns is None:
jcreate.indexByAll()
else:
jcreate.indexBy(self._columns)
return jcreate
def _createIndex(self, path):
"""
Create index for table path.
:param path: path to the table
"""
jcreate = self._init_create_command()
jcreate.createIndex(path)
def table(self, tableName):
"""
Create index for Spark persistent table.
:param table: table name that exists in catalog
"""
jcreate = self._init_create_command()
jcreate.table(tableName)
def parquet(self, path):
"""
Create index for Parquet table. Forces 'parquet' format and overwrites any other defined
previously.
:param path: path to the Parquet table
"""
self._manager.format(Const.PARQUET_SOURCE)
self._createIndex(path)
class ExistsIndexCommand(object):
"""
'ExistsIndexCommand' reports whether or not given table path is indexed.
"""
def __init__(self, manager):
self._manager = manager
def _existsIndex(self, path):
"""
Load index from metastore for the table path and check its existence. Uses provided source
from 'DataFrameIndexManager'.
:param path: path to the table
:return: True if index exists, False otherwise
"""
return self._manager._jdim.exists().existsIndex(path)
def table(self, tableName):
"""
Check existence of index for Spark persistent table.
:param tableName: table name in catalog
:return: True if index exists, False otherwise
"""
return self._manager._jdim.exists().table(tableName)
def parquet(self, path):
"""
Load index for Parquet table from metastore and check its existence. Forces 'parquet'
source and overwrites any other source set before.
:param path: path to the Parquet table
:return: True if index exists, False otherwise
"""
self._manager.format(Const.PARQUET_SOURCE)
return self._existsIndex(path)
class DeleteIndexCommand(object):
"""
'DeleteIndexCommand' provides functionality to delete existing index. Current behaviour is
no-op when deleting non-existent index.
"""
def __init__(self, manager):
self._manager = manager
def _deleteIndex(self, path):
"""
Delete index from metastore for table path. If index does not exist, results in no-op.
Uses provided source from 'DataFrameIndexManager'.
:param path: path to the table
"""
self._manager._jdim.delete().deleteIndex(path)
def table(self, tableName):
"""
Delete index for Spark persistent table. Behaviour is similar to the datasource delete
command.
:param tableName: table name in catalog
"""
self._manager._jdim.delete().table(tableName)
def parquet(self, path):
"""
Delete index from metastore for Parquet table. If index does not exist, results in no-op.
Forces 'parquet' source and overwrites any other source set before.
:param path: path to the Parquet table
"""
self._manager.format(Const.PARQUET_SOURCE)
self._deleteIndex(path)
class DataFrameIndexManager(object):
"""
Entrypoint for working with index functionality, e.g. reading indexed table, creating index
for provided file path, or deleting index for table.
See examples on usage:
>>> df = context.index.format("parquet").option("key", "value").load("path")
>>> df = context.index.parquet("path")
>>> context.index.exists.parquet("path")
True
>>> context.index.delete.parquet("path")
"""
def __init__(self, context):
if not isinstance(context, QueryContext):
msg = 'Expected <QueryContext> instance, found %s' % (context)
raise AttributeError(msg)
# SQLContext to create DataFrame
self._sqlctx = context.spark_session._wrapped
# manager for context
self._jdim = context._jcontext.index()
# data source that matches table type, by default is 'parquet'
self._source = Const.PARQUET_SOURCE
# extra options to use for data source
self._options = {}
def _init_manager(self):
"""
Initialize java index manager by setting format and all options that were set before
the call.
"""
self._jdim.format(self._source)
for (key, value) in self._options.items():
self._jdim.option(key, value)
def format(self, source):
"""
Set file format for table, e.g. 'parquet'.
:param source: source to set
:return: self
"""
self._source = '%s' % (source)
return self
def option(self, key, value):
"""
Add new option for this manager, if option key is already in options, will overwrite
existing value. All options are stored as strings. This method is added mainly for
parity with 'DataFrameReader', therefore all options will be applied the same way.
:param key: option key
:param value: option value
:return: self
"""
str_key = '%s' % (key)
str_value = '%s' % (value)
self._options[str_key] = str_value
return self
def options(self, opts):
"""
Add multiple options as dictionary. Raises error if passed instance is not a <dict>.
These options will overwrite already set ones.
:param: opts dictionary
:return: self
"""
if not isinstance(opts, dict):
msg = 'Expected <dict>, found %s' % (opts)
raise AttributeError(msg)
for (key, value) in opts.items():
self.option(key, value)
return self
def table(self, tableName):
"""
Load index for Spark persistent table that exists in catalog. Behaviour is similar to the
datasource index loading.
:param tableName: table name in catalog
:return: DataFrame instance
"""
self._init_manager()
return DataFrame(self._jdim.table(tableName), self._sqlctx)
def parquet(self, path):
"""
Shortcut for setting source as 'parquet' and loading DataFrame.
Recommended to use over standard interface (format -> load).
:param path: path to the Parquet table
:return: DataFrame instance
"""
self.format(Const.PARQUET_SOURCE)
return self.load(path)
def load(self, path):
"""
Load table path and return DataFrame for provided source and options.
:param path: path to the table
:return: DataFrame instance
"""
self._init_manager()
return DataFrame(self._jdim.load(path), self._sqlctx)
@property
def create(self):
"""
Access to 'create index' functionality.
:return: CreateIndexCommand instance
"""
self._init_manager()
return CreateIndexCommand(self)
@property
def exists(self):
"""
Access to 'exists index' functionality.
:return: ExistsIndexCommand instance
"""
self._init_manager()
return ExistsIndexCommand(self)
@property
def delete(self):
"""
Access to 'delete index' functionality.
:return: DeleteIndexCommand instance
"""
self._init_manager()
return DeleteIndexCommand(self)
class QueryContext(object):
"""
The entrypoint to programming Spark with index functionality.
QueryContext can be used to create 'DataFrame' with index support by creating filesystem
metastore and storing index information there, which includes min/max/nulls statistics and
optional filter statistics, e.g. bloom filters for selected indexed columns.
To create QueryContext use example:
>>> from pyspark.sql.session import SparkSession
>>> spark = SparkSession.builder...
>>> from lightcopy.index import QueryContext
>>> context = QueryContext(spark)
"""
def __init__(self, session):
if not isinstance(session, SparkSession):
msg = 'Expected <SparkSession> to initialize query context, found %s' % (session)
raise AttributeError(msg)
_jvm = session.sparkContext._jvm
self._spark_session = session
self._jcontext = _jvm.com.github.lightcopy.QueryContext(session._jsparkSession)
@property
def spark_session(self):
"""
Return reference to SparkSession instance used to create this QueryContext.
:return: SparkSession instance
"""
return self._spark_session
@property
def index(self):
"""
Return DataFrameIndexManager instance to get access to index functionality.
This is the main method to query DataFrame or create index.
:return: DataFrameIndexManager instance
"""
return DataFrameIndexManager(self)
| |
#!/usr/bin/env python
"""The EE Python library."""
__version__ = '0.1.191'
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
# pylint: disable=g-bad-import-order
import builtins
import collections
import datetime
import inspect
import numbers
import os
import six
import sys
import webbrowser
# Optional imports.
# pylint: disable=g-import-not-at-top
try:
import IPython
except ImportError:
pass
from . import batch
from . import data
from . import deserializer
from . import ee_types as types
from ._helpers import _GetPersistentCredentials
# Public re-exports.
from ._helpers import ServiceAccountCredentials
from ._helpers import apply # pylint: disable=redefined-builtin
from ._helpers import call
from ._helpers import profilePrinting
from .apifunction import ApiFunction
from .collection import Collection
from .computedobject import ComputedObject
from .customfunction import CustomFunction
from .dictionary import Dictionary
from .ee_date import Date
from .ee_exception import EEException
from .ee_list import List
from .ee_number import Number
from .ee_string import String
from .element import Element
from .encodable import Encodable
from .feature import Feature
from .featurecollection import FeatureCollection
from .filter import Filter
from .function import Function
from .geometry import Geometry
from .image import Image
from .imagecollection import ImageCollection
from .oauth import request_token
from .oauth import write_token
from .oauth import get_authorization_url
from .serializer import Serializer
from .terrain import Terrain
# A list of autogenerated class names added by _InitializeGenerateClasses.
_generatedClasses = []
class _AlgorithmsContainer(dict):
"""A lightweight class that is used as a dictionary with dot notation.
"""
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError
def __setattr__(self, name, value):
self[name] = value
def __delattr__(self, name):
del self[name]
def in_colab_shell():
"""Tests if the code is being executed within Google Colab."""
try:
import google.colab
return isinstance(IPython.get_ipython(), google.colab._shell.Shell)
except ImportError:
return False
except AttributeError: # If google.colab._shell is not defined.
return False
def in_jupyter_shell():
"""Tests if the code is being executed within Jupyter."""
try:
import ipykernel.zmqshell
return isinstance(IPython.get_ipython(), ipykernel.zmqshell.ZMQInteractiveShell)
except ImportError:
return False
except NameError:
return False
def obtain_and_write_token(auth_code=None):
"""Obtains and writes credentials token based on a authorization code."""
if not auth_code:
auth_code = builtins.input('Enter verification code: ')
assert isinstance(auth_code, str)
token = request_token(auth_code)
write_token(token)
print('\nSuccessfully saved authorization token.') # pylint: disable=superfluous-parens
def display_auth_instructions_for_noninteractive(auth_url):
"""Displays instructions for authenticating without blocking for user input."""
print('Paste the following address into a web browser:\n'
'\n'
' %s\n'
'\n'
'On the web page, please authorize access to your '
'Earth Engine account and copy the authentication code. '
'Next authenticate with the following command:\n'
'\n'
' earthengine authenticate '
'--authorization-code=PLACE_AUTH_CODE_HERE\n'
% auth_url)
def display_auth_instructions_with_print(auth_url):
"""Displays instructions for authenticating using a print statement."""
print('To authorize access needed by Earth Engine, open the following '
'URL in a web browser and follow the instructions. If the web '
'browser does not start automatically, please manually browse the '
'URL below.\n'
'\n'
' {0}\n'
'\n'
'The authorization workflow will generate a code, which you '
'should paste in the box below. '.format(auth_url)
)
def display_auth_instructions_with_html(auth_url):
"""Displays instructions for authenticating using HTML code."""
try:
display(IPython.display.HTML(
"""<p>To authorize access needed by Earth Engine, open the following
URL in a web browser and follow the instructions:</p>
<p><a href={0}>{0}</a></p>
<p>The authorization workflow will generate a code, which you
should paste in the box below</p>
""".format(auth_url)))
except ImportError:
print('The IPython module must be instaslled to use HTML.')
raise
def Authenticate(authorization_code=None, quiet=None):
"""Prompts the user to authorize access to Earth Engine via OAuth2.
Args:
authorization_code: An optional authorization code.
"""
print('DEBUG starting Authenticate v17')
if authorization_code:
obtain_and_write_token(authorization_code)
return
auth_url = get_authorization_url()
if quiet:
display_auth_instructions_for_noninteractive(auth_url)
return
if in_colab_shell():
if sys.version_info[0] == 2: # Python 2
display_auth_instructions_for_noninteractive(auth_url)
return
else: # Python 3
display_auth_instructions_with_print(auth_url)
elif in_jupyter_shell():
display_auth_instructions_with_html(auth_url)
else:
display_auth_instructions_with_print(auth_url)
webbrowser.open_new(auth_url)
auth_code = builtins.input('Enter verification code: ')
assert isinstance(auth_code, str)
obtain_and_write_token(auth_code.strip())
# A dictionary of algorithms that are not bound to a specific class.
Algorithms = _AlgorithmsContainer()
def Initialize(
credentials='persistent',
opt_url=None,
use_cloud_api=False,
cloud_api_key=None,
http_transport=None,
project=None):
"""Initialize the EE library.
If this hasn't been called by the time any object constructor is used,
it will be called then. If this is called a second time with a different
URL, this doesn't do an un-initialization of e.g.: the previously loaded
Algorithms, but will overwrite them and let point at alternate servers.
Args:
credentials: OAuth2 credentials. 'persistent' (default) means use
credentials already stored in the filesystem, or raise an explanatory
exception guiding the user to create those credentials.
opt_url: The base url for the EarthEngine REST API to connect to.
use_cloud_api: Whether the Cloud API should be used.
cloud_api_key: An optional API key to use the Cloud API.
http_transport: The http transport method to use when making requests.
project: The project-id or number to use when making api calls.
"""
if credentials == 'persistent':
credentials = _GetPersistentCredentials()
data.initialize(
credentials=credentials,
api_base_url=(opt_url + '/api' if opt_url else None),
tile_base_url=opt_url,
use_cloud_api=use_cloud_api,
cloud_api_base_url=opt_url,
cloud_api_key=cloud_api_key,
project=project,
http_transport=http_transport)
# Initialize the dynamically loaded functions on the objects that want them.
ApiFunction.initialize()
Element.initialize()
Image.initialize()
Feature.initialize()
Collection.initialize()
ImageCollection.initialize()
FeatureCollection.initialize()
Filter.initialize()
Geometry.initialize()
List.initialize()
Number.initialize()
String.initialize()
Date.initialize()
Dictionary.initialize()
Terrain.initialize()
_InitializeGeneratedClasses()
_InitializeUnboundMethods()
def Reset():
"""Reset the library. Useful for re-initializing to a different server."""
data.reset()
ApiFunction.reset()
Element.reset()
Image.reset()
Feature.reset()
Collection.reset()
ImageCollection.reset()
FeatureCollection.reset()
Filter.reset()
Geometry.reset()
List.reset()
Number.reset()
String.reset()
Date.reset()
Dictionary.reset()
Terrain.reset()
_ResetGeneratedClasses()
global Algorithms
Algorithms = _AlgorithmsContainer()
def _ResetGeneratedClasses():
"""Remove the dynamic classes."""
global _generatedClasses
for name in _generatedClasses:
ApiFunction.clearApi(globals()[name])
del globals()[name]
_generatedClasses = []
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _Promote(arg, klass):
"""Wrap an argument in an object of the specified class.
This is used to e.g.: promote numbers or strings to Images and arrays
to Collections.
Args:
arg: The object to promote.
klass: The expected type.
Returns:
The argument promoted if the class is recognized, otherwise the
original argument.
"""
if arg is None:
return arg
if klass == 'Image':
return Image(arg)
elif klass == 'Feature':
if isinstance(arg, Collection):
# TODO(user): Decide whether we want to leave this in. It can be
# quite dangerous on large collections.
return ApiFunction.call_(
'Feature', ApiFunction.call_('Collection.geometry', arg))
else:
return Feature(arg)
elif klass == 'Element':
if isinstance(arg, Element):
# Already an Element.
return arg
elif isinstance(arg, Geometry):
# Geometries get promoted to Features.
return Feature(arg)
elif isinstance(arg, ComputedObject):
# Try a cast.
return Element(arg.func, arg.args, arg.varName)
else:
# No way to convert.
raise EEException('Cannot convert %s to Element.' % arg)
elif klass == 'Geometry':
if isinstance(arg, Collection):
return ApiFunction.call_('Collection.geometry', arg)
else:
return Geometry(arg)
elif klass in ('FeatureCollection', 'Collection'):
# For now Collection is synonymous with FeatureCollection.
if isinstance(arg, Collection):
return arg
else:
return FeatureCollection(arg)
elif klass == 'ImageCollection':
return ImageCollection(arg)
elif klass == 'Filter':
return Filter(arg)
elif klass == 'Algorithm':
if isinstance(arg, six.string_types):
# An API function name.
return ApiFunction.lookup(arg)
elif callable(arg):
# A native function that needs to be wrapped.
args_count = len(inspect.getargspec(arg).args)
return CustomFunction.create(arg, 'Object', ['Object'] * args_count)
elif isinstance(arg, Encodable):
# An ee.Function or a computed function like the return value of
# Image.parseExpression().
return arg
else:
raise EEException('Argument is not a function: %s' % arg)
elif klass == 'Dictionary':
if isinstance(arg, dict):
return arg
else:
return Dictionary(arg)
elif klass == 'String':
if (types.isString(arg) or
isinstance(arg, ComputedObject) or
isinstance(arg, String)):
return String(arg)
else:
return arg
elif klass == 'List':
return List(arg)
elif klass in ('Number', 'Float', 'Long', 'Integer', 'Short', 'Byte'):
return Number(arg)
elif klass in globals():
cls = globals()[klass]
ctor = ApiFunction.lookupInternal(klass)
# Handle dynamically created classes.
if isinstance(arg, cls):
# Return unchanged.
return arg
elif ctor:
# The client-side constructor will call the server-side constructor.
return cls(arg)
elif isinstance(arg, six.string_types):
if hasattr(cls, arg):
# arg is the name of a method in klass.
return getattr(cls, arg)()
else:
raise EEException('Unknown algorithm: %s.%s' % (klass, arg))
else:
# Client-side cast.
return cls(arg)
else:
return arg
def _InitializeUnboundMethods():
# Sort the items by length, so parents get created before children.
items = sorted(
ApiFunction.unboundFunctions().items(), key=lambda x: len(x[0]))
for name, func in items:
signature = func.getSignature()
if signature.get('hidden', False):
continue
# Create nested objects as needed.
name_parts = name.split('.')
target = Algorithms
while len(name_parts) > 1:
first = name_parts[0]
# Set the attribute if it doesn't already exist. The try/except block
# works in both Python 2 & 3.
try:
getattr(target, first)
except AttributeError:
setattr(target, first, _AlgorithmsContainer())
target = getattr(target, first)
name_parts = name_parts[1:]
# Attach the function.
# We need a copy of the function to attach properties.
def GenerateFunction(f):
return lambda *args, **kwargs: f.call(*args, **kwargs) # pylint: disable=unnecessary-lambda
bound = GenerateFunction(func)
bound.signature = signature
# Add docs. If there are non-ASCII characters in the docs, and we're in
# Python 2, use a hammer to force them into a str.
try:
bound.__doc__ = str(func)
except UnicodeEncodeError:
bound.__doc__ = func.__str__().encode('utf8')
setattr(target, name_parts[0], bound)
def _InitializeGeneratedClasses():
"""Generate classes for extra types that appear in the web API."""
signatures = ApiFunction.allSignatures()
# Collect the first part of all function names.
names = set([name.split('.')[0] for name in signatures])
# Collect the return types of all functions.
returns = set([signatures[sig]['returns'] for sig in signatures])
want = [name for name in names.intersection(returns) if name not in globals()]
for name in want:
globals()[name] = _MakeClass(name)
_generatedClasses.append(name)
ApiFunction._bound_signatures.add(name) # pylint: disable=protected-access
# Warning: we're passing all of globals() into registerClasses.
# This is a) pass by reference, and b) a lot more stuff.
types._registerClasses(globals()) # pylint: disable=protected-access
def _MakeClass(name):
"""Generates a dynamic API class for a given name."""
def init(self, *args):
"""Initializer for dynamically created classes.
Args:
self: The instance of this class. Listed to make the linter hush.
*args: Either a ComputedObject to be promoted to this type, or
arguments to an algorithm with the same name as this class.
Returns:
The new class.
"""
klass = globals()[name]
onlyOneArg = (len(args) == 1)
# Are we trying to cast something that's already of the right class?
if onlyOneArg and isinstance(args[0], klass):
result = args[0]
else:
# Decide whether to call a server-side constructor or just do a
# client-side cast.
ctor = ApiFunction.lookupInternal(name)
firstArgIsPrimitive = not isinstance(args[0], ComputedObject)
shouldUseConstructor = False
if ctor:
if not onlyOneArg:
# Can't client-cast multiple arguments.
shouldUseConstructor = True
elif firstArgIsPrimitive:
# Can't cast a primitive.
shouldUseConstructor = True
elif args[0].func != ctor:
# We haven't already called the constructor on this object.
shouldUseConstructor = True
# Apply our decision.
if shouldUseConstructor:
# Call ctor manually to avoid having promote() called on the output.
ComputedObject.__init__(
self, ctor, ctor.promoteArgs(ctor.nameArgs(args)))
else:
# Just cast and hope for the best.
if not onlyOneArg:
# We don't know what to do with multiple args.
raise EEException(
'Too many arguments for ee.%s(): %s' % (name, args))
elif firstArgIsPrimitive:
# Can't cast a primitive.
raise EEException(
'Invalid argument for ee.%s(): %s. Must be a ComputedObject.' %
(name, args))
else:
result = args[0]
ComputedObject.__init__(self, result.func, result.args, result.varName)
properties = {'__init__': init, 'name': lambda self: name}
new_class = type(str(name), (ComputedObject,), properties)
ApiFunction.importApi(new_class, name, name)
return new_class
# Set up type promotion rules as soon the package is loaded.
Function._registerPromoter(_Promote) # pylint: disable=protected-access
| |
import datetime
from django import forms
from django.core import urlresolvers
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.translation import ugettext_lazy as _
from purr import managers
#--------------------------------------------------------------------------
#
# Abstract:
#
#--------------------------------------------------------------------------
class Base(models.Model):
#----------------------------------
# All database fields:
#----------------------------------
# Hidden:
created = models.DateTimeField(auto_now_add=True, editable=False,)
modified = models.DateTimeField(auto_now=True, editable=False,)
# Base:
notes = models.TextField(_(u'notes'), blank=True, help_text=_(u'Not published.'),)
#----------------------------------
# Class Meta:
#----------------------------------
class Meta:
abstract = True
get_latest_by = 'modified'
#----------------------------------
# Custom methods:
#----------------------------------
@property
def is_modified(self):
return self.modified > self.created
#--------------------------------------------------------------------------
#
# Models:
#
#--------------------------------------------------------------------------
class Category(Base):
#----------------------------------
# All database fields:
#----------------------------------
# Meta:
slug = models.SlugField(max_length=255, help_text=_(u'Short descriptive unique name for use in urls.'),)
# Base:
name = models.CharField(_(u'name'), max_length=200, help_text=_(u'Short descriptive name for this category.'),)
# Foreign keys:
parent = models.ForeignKey('self', null=True, blank=True, related_name='child',)
#----------------------------------
# Custom manager attributes:
#----------------------------------
objects = managers.CategoryManager()
#----------------------------------
# Class Meta:
#----------------------------------
class Meta:
ordering = ['parent__name', 'name',]
unique_together = ('slug', 'parent',)
verbose_name = _('category',)
verbose_name_plural = _('categories',)
#----------------------------------
# def __XXX__()
#----------------------------------
def __unicode__(self):
name_list = [category.name.upper() for category in self._recurse_for_parents(self)]
name_list.append(self.name)
return _(u'%s') % self.get_separator().join(name_list)
#----------------------------------
# def save()
#----------------------------------
def save(self, **kwargs):
if self.id:
if self.parent and self.parent_id == self.id:
raise forms.ValidationError(_(u'You may not save a category in itself!'))
for p in self._recurse_for_parents(self):
if self.id == p.id:
raise forms.ValidationError(_(u'You may not save a category in itself!'))
if not self.slug:
self.slug = slugify(self.name)
super(Category, self).save(**kwargs) # Call the "real" save()
#----------------------------------
# def get_absolute_url()
#----------------------------------
def get_absolute_url(self):
parents = self._recurse_for_parents(self)
slug_list = [category.slug for category in parents]
if slug_list:
slug_list = '/'.join(slug_list) + '/'
else:
slug_list = ''
return urlresolvers.reverse('purr_category_purr', kwargs={'hierarchy' : slug_list,},)
#----------------------------------
# Custom methods:
#----------------------------------
def parents(self):
return self._recurse_for_parents(self)
#----------------------------------
def children(self):
return self.category_set.all().order_by('name')
#----------------------------------
def get_separator(self):
return ' | '
#----------------------------------
def _recurse_for_parents(self, category_obj):
p_list = []
if category_obj.parent_id:
p = category_obj.parent
p_list.append(p)
if p != self:
more = self._recurse_for_parents(p)
p_list.extend(more)
if category_obj == self and p_list:
p_list.reverse()
return p_list
#----------------------------------
def _parents_repr(self):
"Representation of categories."
name_list = [category.name for category in self._recurse_for_parents(self)]
return self.get_separator().join(name_list)
_parents_repr.short_description = _(u'Category parents')
#----------------------------------
def get_url_name(self):
"Get all the absolute URLs and names for use in the site navigation."
name_list = []
url_list = []
for category in self._recurse_for_parents(self):
name_list.append(category.name)
url_list.append(category.get_absolute_url())
name_list.append(self.name)
url_list.append(self.get_absolute_url())
return zip(name_list, url_list)
#----------------------------------
def _flatten(self, L):
"Taken from a python newsgroup post."
if type(L) != type([]): return [L]
if L == []: return L
return self._flatten(L[0]) + self._flatten(L[1:])
#----------------------------------
def _recurse_for_children(self, node):
children = []
children.append(node)
for child in node.child.all():
if child != self:
children_list = self._recurse_for_children(child)
children.append(children_list)
return children
#----------------------------------
def get_all_children(self, include_self=False):
"Gets a list of all of the children categories."
children_list = self._recurse_for_children(self)
if include_self:
ix = 0
else:
ix = 1
flat_list = self._flatten(children_list[ix:])
return flat_list
| |
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams.update({'font.size': 16})
from csv_dictionary import *
boxes = makeCsvDictOfArrays('test_results/BENCHMARK_boxes_dt.csv')
color1 = [0, 0, 0.5]
color2 = [0.5, 0.5, 0.5]
linestyle1 = '-'
linestyle2 = '--'
def plotTimePosition3(time, position3):
plt.gcf()
plt.plot(time, position3[:,0], linewidth=4.0, linestyle=linestyle1, color=color1)
plt.plot(time, position3[:,1], linewidth=4.0, linestyle=linestyle2, color=color1)
plt.plot(time, position3[:,2], linewidth=2.0, linestyle=linestyle1, color=color2)
plt.xlabel('Time (s)')
plt.ylabel('Position (m)')
plt.grid()
plt.legend(['x','y','z'], loc='best');
# helper function for resizing axes
def vector_scale(x, scale):
mean = np.mean(x)
centered = x - mean
return mean + centered*scale
def vector_log10_scale(x, scale):
logx = np.log10(x)
scaled = vector_scale(logx, scale)
return [10**l for l in scaled]
# Create a plot with time step Dt on horizontal axis
# Value of `yname` plotted on vertical axis
def plotEnginesDt(params, yname
, axscale=1.1
, ayscale=1.1
, csvDict=boxes
, legend='best'
, xname='dt'
, xlabel='Time step (s)'
, ylabel='Error'
, xlim=[]
, ylim=[]
, xscale='linear'
, yscale='linear'
, title='title'
, skipDart=False
):
engines = {}
engines['bullet'] = ['$B$', 'b--']
if not skipDart:
engines['dart'] = ['$d$', 'g--']
engines['ode'] = ['$O$', 'r--']
engines['simbody'] = ['$S$', 'k--']
fig = plt.figure()
xdata = {}
ydata = {}
for e in sorted(engines.keys()):
params['engine'] = e
ii = np.array(list(query(csvDict, params)))
xdata[e] = csvDict[xname][ii]
ydata[e] = csvDict[yname][ii]
color = engines[e][1][0]
plt.plot(xdata[e]
, ydata[e]+np.finfo(float).eps
, engines[e][1]
, mfc=color
, marker=engines[e][0]
, markersize=20.0
, markeredgecolor=color
, linewidth=2.0
)
plt.grid()
plt.xlabel(xlabel, fontsize=18)
plt.ylabel(ylabel, fontsize=18)
plt.gca().set_xscale(xscale)
plt.gca().set_yscale(yscale)
plt.title(title)
plt.gcf().set_size_inches(10, 6)
if len(xlim) == 2:
plt.xlim(xlim)
elif xscale == 'log':
plt.xlim(vector_log10_scale(plt.xlim(), axscale))
else:
plt.xlim(vector_scale(plt.xlim(), axscale))
if len(ylim) == 2:
plt.ylim(ylim)
elif yscale == 'log':
plt.ylim(vector_log10_scale(plt.ylim(), ayscale))
else:
plt.ylim(vector_scale(plt.ylim(), ayscale))
plt.legend(sorted(engines.keys()), loc=legend)
plt.show();
# some extra info about each plot
xdata_minmax = {}
ydata_minmax = {}
for e in sorted(engines.keys()):
xdata_minmax[e] = [min(xdata[e]), max(xdata[e])]
ydata_minmax[e] = [min(ydata[e]), max(ydata[e])]
def plotEnginesTime(params, yname
, csvDict=boxes
, legend='best'
, skipDart=False
, xname='timeRatio'
, xlabel='Time ratio (real / sim)'
, ylabel='Error'
, xlim=[]
, ylim=[]
, xscale='linear'
, yscale='linear'
, title='title'
):
plotEnginesDt(params, yname
, csvDict=csvDict
, legend=legend
, skipDart=skipDart
, xname=xname
, xlabel=xlabel
, ylabel=ylabel
, xlim=xlim
, ylim=ylim
, xscale=xscale
, yscale=yscale
, title=title
)
def plotEnginesModelCount(params, yname
, csvDict=boxes
, legend='best'
, skipDart=False
, xname='modelCount'
, xlabel='Model count'
, ylabel='Time ratio (real / sim)'
, xlim=[]
, ylim=[]
, xscale='linear'
, yscale='linear'
, title='title'
):
plotEnginesDt(params, yname
, csvDict=csvDict
, legend=legend
, skipDart=skipDart
, xname=xname
, xlabel=xlabel
, ylabel=ylabel
, xlim=xlim
, ylim=ylim
, xscale=xscale
, yscale=yscale
, title=title
)
def plot3TimeDt(params
, csvDict=boxes
, yname='linPositionErr_maxAbs'
, title=''
, skipDart=False
, xscale='linear'
, yscale='linear'
):
plotEnginesDt(params
, csvDict=csvDict
, yname=yname
, title=title
, skipDart=skipDart
, xscale=xscale
, yscale=yscale
)
plotEnginesDt(params
, csvDict=csvDict
, yname='timeRatio'
, ylabel='Computational time / sim time'
, title='Computational time'
, skipDart=skipDart
, xscale=xscale
, yscale=yscale
)
plotEnginesTime(params
, csvDict=csvDict
, yname=yname
, title=title
, skipDart=skipDart
, xscale=xscale
, yscale=yscale
)
def plotErrorDt(classname, title_prefix
, csvDict=boxes
, legend='best'
, xscale='linear'
, yscale='linear'):
p = {}
p['classname'] = classname
title_prefix = title_prefix
plotEnginesDt(p, yname='linPositionErr_maxAbs', title=title_prefix + 'position'
, csvDict=csvDict, legend=legend, xscale=xscale, yscale=yscale)
plotEnginesDt(p, yname='angPositionErr_mag_maxAbs', title=title_prefix + 'angle'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesDt(p, yname='linVelocityErr_maxAbs', title=title_prefix + 'velocity'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesDt(p, yname='angMomentumErr_maxAbs', title=title_prefix + 'angular momentum'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesDt(p, yname='energyError_maxAbs', title=title_prefix + 'energy'
, csvDict=csvDict, legend=legend, yscale=yscale)
def plotTimeDt(classname, title_prefix
, csvDict=boxes
, legend='best'
, yscale='linear'):
p = {}
p['classname'] = classname
title_prefix = title_prefix
plotEnginesDt(p, yname='timeRatio', title=title_prefix + 'time ratio'
, ylabel='Time ratio (real / sim)'
, csvDict=csvDict, legend=legend, yscale=yscale)
def plotErrorTime(classname, title_prefix
, csvDict=boxes
, legend='best'
, yscale='linear'):
p = {}
p['classname'] = classname
title_prefix = title_prefix
plotEnginesTime(p, yname='linPositionErr_maxAbs', title=title_prefix + 'position'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesTime(p, yname='angPositionErr_mag_maxAbs', title=title_prefix + 'angle'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesTime(p, yname='linVelocityErr_maxAbs', title=title_prefix + 'velocity'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesTime(p, yname='angMomentumErr_maxAbs', title=title_prefix + 'angular momentum'
, csvDict=csvDict, legend=legend, yscale=yscale)
plotEnginesTime(p, yname='energyError_maxAbs', title=title_prefix + 'energy'
, csvDict=csvDict, legend=legend, yscale=yscale)
| |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2018 (ita)
"""
Java support
Javac is one of the few compilers that behaves very badly:
#. it outputs files where it wants to (-d is only for the package root)
#. it recompiles files silently behind your back
#. it outputs an undefined amount of files (inner classes)
Remember that the compilation can be performed using Jython[1] rather than regular Python. Instead of
running one of the following commands::
./waf configure
python waf configure
You would have to run::
java -jar /path/to/jython.jar waf configure
[1] http://www.jython.org/
Usage
=====
Load the "java" tool.
def configure(conf):
conf.load('java')
Java tools will be autodetected and eventually, if present, the quite
standard JAVA_HOME environment variable will be used. The also standard
CLASSPATH variable is used for library searching.
In configuration phase checks can be done on the system environment, for
example to check if a class is known in the classpath::
conf.check_java_class('java.io.FileOutputStream')
or if the system supports JNI applications building::
conf.check_jni_headers()
The java tool supports compiling java code, creating jar files and
creating javadoc documentation. This can be either done separately or
together in a single definition. For example to manage them separately::
bld(features = 'javac',
srcdir = 'src',
compat = '1.7',
use = 'animals',
name = 'cats-src',
)
bld(features = 'jar',
basedir = '.',
destfile = '../cats.jar',
name = 'cats',
use = 'cats-src'
)
Or together by defining all the needed attributes::
bld(features = 'javac jar javadoc',
srcdir = 'src/', # folder containing the sources to compile
outdir = 'src', # folder where to output the classes (in the build directory)
compat = '1.6', # java compatibility version number
classpath = ['.', '..'],
# jar
basedir = 'src', # folder containing the classes and other files to package (must match outdir)
destfile = 'foo.jar', # do not put the destfile in the folder of the java classes!
use = 'NNN',
jaropts = ['-C', 'default/src/', '.'], # can be used to give files
manifest = 'src/Manifest.mf', # Manifest file to include
# javadoc
javadoc_package = ['com.meow' , 'com.meow.truc.bar', 'com.meow.truc.foo'],
javadoc_output = 'javadoc',
)
External jar dependencies can be mapped to a standard waf "use" dependency by
setting an environment variable with a CLASSPATH prefix in the configuration,
for example::
conf.env.CLASSPATH_NNN = ['aaaa.jar', 'bbbb.jar']
and then NNN can be freely used in rules as::
use = 'NNN',
In the java tool the dependencies via use are not transitive by default, as
this necessity depends on the code. To enable recursive dependency scanning
use on a specific rule:
recurse_use = True
Or build-wise by setting RECURSE_JAVA:
bld.env.RECURSE_JAVA = True
Unit tests can be integrated in the waf unit test environment using the javatest extra.
"""
import os, shutil
from waflib import Task, Utils, Errors, Node
from waflib.Configure import conf
from waflib.TaskGen import feature, before_method, after_method, taskgen_method
from waflib.Tools import ccroot
ccroot.USELIB_VARS['javac'] = set(['CLASSPATH', 'JAVACFLAGS'])
SOURCE_RE = '**/*.java'
JAR_RE = '**/*'
class_check_source = '''
public class Test {
public static void main(String[] argv) {
Class lib;
if (argv.length < 1) {
System.err.println("Missing argument");
System.exit(77);
}
try {
lib = Class.forName(argv[0]);
} catch (ClassNotFoundException e) {
System.err.println("ClassNotFoundException");
System.exit(1);
}
lib = null;
System.exit(0);
}
}
'''
@feature('javac')
@before_method('process_source')
def apply_java(self):
"""
Create a javac task for compiling *.java files*. There can be
only one javac task by task generator.
"""
Utils.def_attrs(self, jarname='', classpath='',
sourcepath='.', srcdir='.',
jar_mf_attributes={}, jar_mf_classpath=[])
outdir = getattr(self, 'outdir', None)
if outdir:
if not isinstance(outdir, Node.Node):
outdir = self.path.get_bld().make_node(self.outdir)
else:
outdir = self.path.get_bld()
outdir.mkdir()
self.outdir = outdir
self.env.OUTDIR = outdir.abspath()
self.javac_task = tsk = self.create_task('javac')
tmp = []
srcdir = getattr(self, 'srcdir', '')
if isinstance(srcdir, Node.Node):
srcdir = [srcdir]
for x in Utils.to_list(srcdir):
if isinstance(x, Node.Node):
y = x
else:
y = self.path.find_dir(x)
if not y:
self.bld.fatal('Could not find the folder %s from %s' % (x, self.path))
tmp.append(y)
tsk.srcdir = tmp
if getattr(self, 'compat', None):
tsk.env.append_value('JAVACFLAGS', ['-source', str(self.compat)])
if hasattr(self, 'sourcepath'):
fold = [isinstance(x, Node.Node) and x or self.path.find_dir(x) for x in self.to_list(self.sourcepath)]
names = os.pathsep.join([x.srcpath() for x in fold])
else:
names = [x.srcpath() for x in tsk.srcdir]
if names:
tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names])
@taskgen_method
def java_use_rec(self, name, **kw):
"""
Processes recursively the *use* attribute for each referred java compilation
"""
if name in self.tmp_use_seen:
return
self.tmp_use_seen.append(name)
try:
y = self.bld.get_tgen_by_name(name)
except Errors.WafError:
self.uselib.append(name)
return
else:
y.post()
# Add generated JAR name for CLASSPATH. Task ordering (set_run_after)
# is already guaranteed by ordering done between the single tasks
if hasattr(y, 'jar_task'):
self.use_lst.append(y.jar_task.outputs[0].abspath())
for x in self.to_list(getattr(y, 'use', [])):
self.java_use_rec(x)
@feature('javac')
@before_method('propagate_uselib_vars')
@after_method('apply_java')
def use_javac_files(self):
"""
Processes the *use* attribute referring to other java compilations
"""
self.use_lst = []
self.tmp_use_seen = []
self.uselib = self.to_list(getattr(self, 'uselib', []))
names = self.to_list(getattr(self, 'use', []))
get = self.bld.get_tgen_by_name
for x in names:
try:
y = get(x)
except Errors.WafError:
self.uselib.append(x)
else:
y.post()
if hasattr(y, 'jar_task'):
self.use_lst.append(y.jar_task.outputs[0].abspath())
self.javac_task.set_run_after(y.jar_task)
else:
for tsk in y.tasks:
self.javac_task.set_run_after(tsk)
# If recurse use scan is enabled recursively add use attribute for each used one
if getattr(self, 'recurse_use', False) or self.bld.env.RECURSE_JAVA:
self.java_use_rec(x)
self.env.append_value('CLASSPATH', self.use_lst)
@feature('javac')
@after_method('apply_java', 'propagate_uselib_vars', 'use_javac_files')
def set_classpath(self):
"""
Sets the CLASSPATH value on the *javac* task previously created.
"""
if getattr(self, 'classpath', None):
self.env.append_unique('CLASSPATH', getattr(self, 'classpath', []))
for x in self.tasks:
x.env.CLASSPATH = os.pathsep.join(self.env.CLASSPATH) + os.pathsep
@feature('jar')
@after_method('apply_java', 'use_javac_files')
@before_method('process_source')
def jar_files(self):
"""
Creates a jar task (one maximum per task generator)
"""
destfile = getattr(self, 'destfile', 'test.jar')
jaropts = getattr(self, 'jaropts', [])
manifest = getattr(self, 'manifest', None)
basedir = getattr(self, 'basedir', None)
if basedir:
if not isinstance(self.basedir, Node.Node):
basedir = self.path.get_bld().make_node(basedir)
else:
basedir = self.path.get_bld()
if not basedir:
self.bld.fatal('Could not find the basedir %r for %r' % (self.basedir, self))
self.jar_task = tsk = self.create_task('jar_create')
if manifest:
jarcreate = getattr(self, 'jarcreate', 'cfm')
if not isinstance(manifest,Node.Node):
node = self.path.find_resource(manifest)
else:
node = manifest
if not node:
self.bld.fatal('invalid manifest file %r for %r' % (manifest, self))
tsk.dep_nodes.append(node)
jaropts.insert(0, node.abspath())
else:
jarcreate = getattr(self, 'jarcreate', 'cf')
if not isinstance(destfile, Node.Node):
destfile = self.path.find_or_declare(destfile)
if not destfile:
self.bld.fatal('invalid destfile %r for %r' % (destfile, self))
tsk.set_outputs(destfile)
tsk.basedir = basedir
jaropts.append('-C')
jaropts.append(basedir.bldpath())
jaropts.append('.')
tsk.env.JAROPTS = jaropts
tsk.env.JARCREATE = jarcreate
if getattr(self, 'javac_task', None):
tsk.set_run_after(self.javac_task)
@feature('jar')
@after_method('jar_files')
def use_jar_files(self):
"""
Processes the *use* attribute to set the build order on the
tasks created by another task generator.
"""
self.uselib = self.to_list(getattr(self, 'uselib', []))
names = self.to_list(getattr(self, 'use', []))
get = self.bld.get_tgen_by_name
for x in names:
try:
y = get(x)
except Errors.WafError:
self.uselib.append(x)
else:
y.post()
self.jar_task.run_after.update(y.tasks)
class JTask(Task.Task):
"""
Base class for java and jar tasks; provides functionality to run long commands
"""
def split_argfile(self, cmd):
inline = [cmd[0]]
infile = []
for x in cmd[1:]:
# jar and javac do not want -J flags in @file
if x.startswith('-J'):
inline.append(x)
else:
infile.append(self.quote_flag(x))
return (inline, infile)
class jar_create(JTask):
"""
Creates a jar file
"""
color = 'GREEN'
run_str = '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}'
def runnable_status(self):
"""
Wait for dependent tasks to be executed, then read the
files to update the list of inputs.
"""
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
if not self.inputs:
try:
self.inputs = [x for x in self.basedir.ant_glob(JAR_RE, remove=False, quiet=True) if id(x) != id(self.outputs[0])]
except Exception:
raise Errors.WafError('Could not find the basedir %r for %r' % (self.basedir, self))
return super(jar_create, self).runnable_status()
class javac(JTask):
"""
Compiles java files
"""
color = 'BLUE'
run_str = '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}'
vars = ['CLASSPATH', 'JAVACFLAGS', 'JAVAC', 'OUTDIR']
"""
The javac task will be executed again if the variables CLASSPATH, JAVACFLAGS, JAVAC or OUTDIR change.
"""
def uid(self):
"""Identify java tasks by input&output folder"""
lst = [self.__class__.__name__, self.generator.outdir.abspath()]
for x in self.srcdir:
lst.append(x.abspath())
return Utils.h_list(lst)
def runnable_status(self):
"""
Waits for dependent tasks to be complete, then read the file system to find the input nodes.
"""
for t in self.run_after:
if not t.hasrun:
return Task.ASK_LATER
if not self.inputs:
self.inputs = []
for x in self.srcdir:
if x.exists():
self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False, quiet=True))
return super(javac, self).runnable_status()
def post_run(self):
"""
List class files created
"""
for node in self.generator.outdir.ant_glob('**/*.class', quiet=True):
self.generator.bld.node_sigs[node] = self.uid()
self.generator.bld.task_sigs[self.uid()] = self.cache_sig
@feature('javadoc')
@after_method('process_rule')
def create_javadoc(self):
"""
Creates a javadoc task (feature 'javadoc')
"""
tsk = self.create_task('javadoc')
tsk.classpath = getattr(self, 'classpath', [])
self.javadoc_package = Utils.to_list(self.javadoc_package)
if not isinstance(self.javadoc_output, Node.Node):
self.javadoc_output = self.bld.path.find_or_declare(self.javadoc_output)
class javadoc(Task.Task):
"""
Builds java documentation
"""
color = 'BLUE'
def __str__(self):
return '%s: %s -> %s\n' % (self.__class__.__name__, self.generator.srcdir, self.generator.javadoc_output)
def run(self):
env = self.env
bld = self.generator.bld
wd = bld.bldnode
#add src node + bld node (for generated java code)
srcpath = self.generator.path.abspath() + os.sep + self.generator.srcdir
srcpath += os.pathsep
srcpath += self.generator.path.get_bld().abspath() + os.sep + self.generator.srcdir
classpath = env.CLASSPATH
classpath += os.pathsep
classpath += os.pathsep.join(self.classpath)
classpath = "".join(classpath)
self.last_cmd = lst = []
lst.extend(Utils.to_list(env.JAVADOC))
lst.extend(['-d', self.generator.javadoc_output.abspath()])
lst.extend(['-sourcepath', srcpath])
lst.extend(['-classpath', classpath])
lst.extend(['-subpackages'])
lst.extend(self.generator.javadoc_package)
lst = [x for x in lst if x]
self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0)
def post_run(self):
nodes = self.generator.javadoc_output.ant_glob('**', quiet=True)
for node in nodes:
self.generator.bld.node_sigs[node] = self.uid()
self.generator.bld.task_sigs[self.uid()] = self.cache_sig
def configure(self):
"""
Detects the javac, java and jar programs
"""
# If JAVA_PATH is set, we prepend it to the path list
java_path = self.environ['PATH'].split(os.pathsep)
v = self.env
if 'JAVA_HOME' in self.environ:
java_path = [os.path.join(self.environ['JAVA_HOME'], 'bin')] + java_path
self.env.JAVA_HOME = [self.environ['JAVA_HOME']]
for x in 'javac java jar javadoc'.split():
self.find_program(x, var=x.upper(), path_list=java_path)
if 'CLASSPATH' in self.environ:
v.CLASSPATH = self.environ['CLASSPATH']
if not v.JAR:
self.fatal('jar is required for making java packages')
if not v.JAVAC:
self.fatal('javac is required for compiling java classes')
v.JARCREATE = 'cf' # can use cvf
v.JAVACFLAGS = []
@conf
def check_java_class(self, classname, with_classpath=None):
"""
Checks if the specified java class exists
:param classname: class to check, like java.util.HashMap
:type classname: string
:param with_classpath: additional classpath to give
:type with_classpath: string
"""
javatestdir = '.waf-javatest'
classpath = javatestdir
if self.env.CLASSPATH:
classpath += os.pathsep + self.env.CLASSPATH
if isinstance(with_classpath, str):
classpath += os.pathsep + with_classpath
shutil.rmtree(javatestdir, True)
os.mkdir(javatestdir)
Utils.writef(os.path.join(javatestdir, 'Test.java'), class_check_source)
# Compile the source
self.exec_command(self.env.JAVAC + [os.path.join(javatestdir, 'Test.java')], shell=False)
# Try to run the app
cmd = self.env.JAVA + ['-cp', classpath, 'Test', classname]
self.to_log("%s\n" % str(cmd))
found = self.exec_command(cmd, shell=False)
self.msg('Checking for java class %s' % classname, not found)
shutil.rmtree(javatestdir, True)
return found
@conf
def check_jni_headers(conf):
"""
Checks for jni headers and libraries. On success the conf.env variables xxx_JAVA are added for use in C/C++ targets::
def options(opt):
opt.load('compiler_c')
def configure(conf):
conf.load('compiler_c java')
conf.check_jni_headers()
def build(bld):
bld.shlib(source='a.c', target='app', use='JAVA')
"""
if not conf.env.CC_NAME and not conf.env.CXX_NAME:
conf.fatal('load a compiler first (gcc, g++, ..)')
if not conf.env.JAVA_HOME:
conf.fatal('set JAVA_HOME in the system environment')
# jni requires the jvm
javaHome = conf.env.JAVA_HOME[0]
dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/include')
if dir is None:
dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/../Headers') # think different?!
if dir is None:
conf.fatal('JAVA_HOME does not seem to be set properly')
f = dir.ant_glob('**/(jni|jni_md).h')
incDirs = [x.parent.abspath() for x in f]
dir = conf.root.find_dir(conf.env.JAVA_HOME[0])
f = dir.ant_glob('**/*jvm.(so|dll|dylib)')
libDirs = [x.parent.abspath() for x in f] or [javaHome]
# On windows, we need both the .dll and .lib to link. On my JDK, they are
# in different directories...
f = dir.ant_glob('**/*jvm.(lib)')
if f:
libDirs = [[x, y.parent.abspath()] for x in libDirs for y in f]
if conf.env.DEST_OS == 'freebsd':
conf.env.append_unique('LINKFLAGS_JAVA', '-pthread')
for d in libDirs:
try:
conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm',
libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA')
except Exception:
pass
else:
break
else:
conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for remote procedure calls using queue
"""
import mox
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import rpc
from nova import test
from nova import service
from nova import manager
from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager",
"Manager for testing")
class FakeManager(manager.Manager):
"""Fake manager for tests"""
def test_method(self):
return 'manager'
class ExtendedService(service.Service):
def test_method(self):
return 'service'
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services"""
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
'test',
'nova.tests.test_service.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'manager')
def test_override_manager_method(self):
serv = ExtendedService('test',
'test',
'test',
'nova.tests.test_service.FakeManager')
serv.start()
self.assertEqual(serv.test_method(), 'service')
class ServiceFlagsTestCase(test.TestCase):
def test_service_enabled_on_create_based_on_flag(self):
self.flags(enable_new_services=True)
host = 'foo'
binary = 'nova-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assert_(not ref['disabled'])
def test_service_disabled_on_create_based_on_flag(self):
self.flags(enable_new_services=False)
host = 'foo'
binary = 'nova-fake'
app = service.Service.create(host=host, binary=binary)
app.start()
app.stop()
ref = db.service_get(context.get_admin_context(), app.service_id)
db.service_destroy(context.get_admin_context(), app.service_id)
self.assert_(ref['disabled'])
class ServiceTestCase(test.TestCase):
"""Test cases for Services"""
def setUp(self):
super(ServiceTestCase, self).setUp()
self.mox.StubOutWithMock(service, 'db')
def test_create(self):
host = 'foo'
binary = 'nova-fake'
topic = 'fake'
# NOTE(vish): Create was moved out of mox replay to make sure that
# the looping calls are created in StartService.
app = service.Service.create(host=host, binary=binary)
self.mox.StubOutWithMock(rpc,
'TopicAdapterConsumer',
use_mock_anything=True)
self.mox.StubOutWithMock(rpc,
'FanoutAdapterConsumer',
use_mock_anything=True)
rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
topic=topic,
proxy=mox.IsA(service.Service)).AndReturn(
rpc.TopicAdapterConsumer)
rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
topic='%s.%s' % (topic, host),
proxy=mox.IsA(service.Service)).AndReturn(
rpc.TopicAdapterConsumer)
rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(),
topic=topic,
proxy=mox.IsA(service.Service)).AndReturn(
rpc.FanoutAdapterConsumer)
rpc.TopicAdapterConsumer.attach_to_eventlet()
rpc.TopicAdapterConsumer.attach_to_eventlet()
rpc.FanoutAdapterConsumer.attach_to_eventlet()
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'report_count': 0,
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
self.mox.ReplayAll()
app.start()
app.stop()
self.assert_(app)
# We're testing sort of weird behavior in how report_state decides
# whether it is disconnected, it looks for a variable on itself called
# 'model_disconnected' and report_state doesn't really do much so this
# these are mostly just for coverage
def test_report_state_no_service(self):
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
service_ref['id']).AndReturn(service_ref)
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
serv = service.Service(host,
binary,
topic,
'nova.tests.test_service.FakeManager')
serv.start()
serv.report_state()
def test_report_state_newly_disconnected(self):
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
mox.IgnoreArg()).AndRaise(Exception())
self.mox.ReplayAll()
serv = service.Service(host,
binary,
topic,
'nova.tests.test_service.FakeManager')
serv.start()
serv.report_state()
self.assert_(serv.model_disconnected)
def test_report_state_newly_connected(self):
host = 'foo'
binary = 'bar'
topic = 'test'
service_create = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova'}
service_ref = {'host': host,
'binary': binary,
'topic': topic,
'report_count': 0,
'availability_zone': 'nova',
'id': 1}
service.db.service_get_by_args(mox.IgnoreArg(),
host,
binary).AndRaise(exception.NotFound())
service.db.service_create(mox.IgnoreArg(),
service_create).AndReturn(service_ref)
service.db.service_get(mox.IgnoreArg(),
service_ref['id']).AndReturn(service_ref)
service.db.service_update(mox.IgnoreArg(), service_ref['id'],
mox.ContainsKeyValue('report_count', 1))
self.mox.ReplayAll()
serv = service.Service(host,
binary,
topic,
'nova.tests.test_service.FakeManager')
serv.start()
serv.model_disconnected = True
serv.report_state()
self.assert_(not serv.model_disconnected)
def test_compute_can_update_available_resource(self):
"""Confirm compute updates their record of compute-service table."""
host = 'foo'
binary = 'nova-compute'
topic = 'compute'
# Any mocks are not working without UnsetStubs() here.
self.mox.UnsetStubs()
ctxt = context.get_admin_context()
service_ref = db.service_create(ctxt, {'host': host,
'binary': binary,
'topic': topic})
serv = service.Service(host,
binary,
topic,
'nova.compute.manager.ComputeManager')
# This testcase want to test calling update_available_resource.
# No need to call periodic call, then below variable must be set 0.
serv.report_interval = 0
serv.periodic_interval = 0
# Creating mocks
self.mox.StubOutWithMock(service.rpc.Connection, 'instance')
service.rpc.Connection.instance(new=mox.IgnoreArg())
service.rpc.Connection.instance(new=mox.IgnoreArg())
service.rpc.Connection.instance(new=mox.IgnoreArg())
self.mox.StubOutWithMock(serv.manager.driver,
'update_available_resource')
serv.manager.driver.update_available_resource(mox.IgnoreArg(), host)
# Just doing start()-stop(), not confirm new db record is created,
# because update_available_resource() works only in
# libvirt environment. This testcase confirms
# update_available_resource() is called. Otherwise, mox complains.
self.mox.ReplayAll()
serv.start()
serv.stop()
db.service_destroy(ctxt, service_ref['id'])
| |
##
# Copyright (c) 2012-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from __future__ import print_function
from StringIO import StringIO
from caldavclientlibrary.client.clientsession import CalDAVSession
from caldavclientlibrary.protocol.url import URL
from caldavclientlibrary.protocol.webdav.definitions import davxml
from calendarserver.tools import tables
from contrib.performance.sqlusage.requests.invite import InviteTest
from contrib.performance.sqlusage.requests.multiget import MultigetTest
from contrib.performance.sqlusage.requests.propfind import PropfindTest
from contrib.performance.sqlusage.requests.propfind_invite import PropfindInviteTest
from contrib.performance.sqlusage.requests.put import PutTest
from contrib.performance.sqlusage.requests.query import QueryTest
from contrib.performance.sqlusage.requests.sync import SyncTest
from pycalendar.datetime import DateTime
from txweb2.dav.util import joinURL
import getopt
import itertools
import sys
from caldavclientlibrary.client.principal import principalCache
"""
This tool is designed to analyze how SQL is being used for various HTTP requests.
It will execute a series of HTTP requests against a test server configuration and
count the total number of SQL statements per request, the total number of rows
returned per request and the total SQL execution time per request. Each series
will be repeated against a varying calendar size so the variation in SQL use
with calendar size can be plotted.
"""
EVENT_COUNTS = (0, 1, 5, 10, 50, 100, 500, 1000,)
SHAREE_COUNTS = (0, 1, 5, 10, 50, 100,)
ICAL = """BEGIN:VCALENDAR
CALSCALE:GREGORIAN
PRODID:-//Example Inc.//Example Calendar//EN
VERSION:2.0
BEGIN:VTIMEZONE
LAST-MODIFIED:20040110T032845Z
TZID:US/Eastern
BEGIN:DAYLIGHT
DTSTART:20000404T020000
RRULE:FREQ=YEARLY;BYDAY=1SU;BYMONTH=4
TZNAME:EDT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
END:DAYLIGHT
BEGIN:STANDARD
DTSTART:20001026T020000
RRULE:FREQ=YEARLY;BYDAY=-1SU;BYMONTH=10
TZNAME:EST
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTAMP:20051222T205953Z
CREATED:20060101T150000Z
DTSTART;TZID=US/Eastern:%d0101T100000
DURATION:PT1H
SUMMARY:event 1
UID:%d-ics
END:VEVENT
END:VCALENDAR
""".replace("\n", "\r\n")
class SQLUsageSession(CalDAVSession):
def __init__(self, server, port=None, ssl=False, afunix=None, user="", pswd="", principal=None, root=None, calendar="calendar", logging=False):
super(SQLUsageSession, self).__init__(server, port, ssl, afunix, user, pswd, principal, root, logging)
self.homeHref = "/calendars/users/%s/" % (self.user,)
self.calendarHref = "/calendars/users/%s/%s/" % (self.user, calendar,)
self.inboxHref = "/calendars/users/%s/inbox/" % (self.user,)
self.notificationHref = "/calendars/users/%s/notification/" % (self.user,)
class EventSQLUsage(object):
def __init__(self, server, port, users, pswds, logFilePath, compact):
self.server = server
self.port = port
self.users = users
self.pswds = pswds
self.logFilePath = logFilePath
self.compact = compact
self.requestLabels = []
self.results = {}
self.currentCount = 0
def runLoop(self, event_counts):
# Make the sessions
sessions = [
SQLUsageSession(self.server, self.port, user=user, pswd=pswd, root="/")
for user, pswd in itertools.izip(self.users, self.pswds)
]
# Set of requests to execute
requests = [
MultigetTest("mget-1" if self.compact else "multiget-1", sessions, self.logFilePath, "event", 1),
MultigetTest("mget-50" if self.compact else "multiget-50", sessions, self.logFilePath, "event", 50),
PropfindTest("prop-cal" if self.compact else "propfind-cal", sessions, self.logFilePath, "event", 1),
SyncTest("s-full" if self.compact else "sync-full", sessions, self.logFilePath, "event", True, 0),
SyncTest("s-1" if self.compact else "sync-1", sessions, self.logFilePath, "event", False, 1),
QueryTest("q-1" if self.compact else "query-1", sessions, self.logFilePath, "event", 1),
QueryTest("q-10" if self.compact else "query-10", sessions, self.logFilePath, "event", 10),
PutTest("put", sessions, self.logFilePath, "event"),
InviteTest("invite-1", sessions, self.logFilePath, "event", 1),
InviteTest("invite-5", sessions, self.logFilePath, "event", 5),
]
self.requestLabels = [request.label for request in requests]
def _warmUp():
# Warm-up server by doing calendar home and child collection propfinds.
# Do this twice because the very first time might provision DB objects and
# blow any DB cache - the second time will warm the DB cache.
props = (davxml.resourcetype,)
for _ignore in range(2):
for session in sessions:
session.getPropertiesOnHierarchy(URL(path=session.homeHref), props)
session.getPropertiesOnHierarchy(URL(path=session.calendarHref), props)
session.getPropertiesOnHierarchy(URL(path=session.inboxHref), props)
session.getPropertiesOnHierarchy(URL(path=session.notificationHref), props)
# Now loop over sets of events
for count in event_counts:
print("Testing count = %d" % (count,))
self.ensureEvents(sessions[0], sessions[0].calendarHref, count)
result = {}
for request in requests:
print(" Test = %s" % (request.label,))
_warmUp()
result[request.label] = request.execute(count)
self.results[count] = result
def report(self):
self._printReport("SQL Statement Count", "count", "%d")
self._printReport("SQL Rows Returned", "rows", "%d")
self._printReport("SQL Time", "timing", "%.1f")
def _printReport(self, title, attr, colFormat):
table = tables.Table()
print(title)
headers = ["Events"] + self.requestLabels
table.addHeader(headers)
formats = [tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY)] + \
[tables.Table.ColumnFormat(colFormat, tables.Table.ColumnFormat.RIGHT_JUSTIFY)] * len(self.requestLabels)
table.setDefaultColumnFormats(formats)
for k in sorted(self.results.keys()):
row = [k] + [getattr(self.results[k][item], attr) for item in self.requestLabels]
table.addRow(row)
os = StringIO()
table.printTable(os=os)
print(os.getvalue())
print("")
def ensureEvents(self, session, calendarhref, n):
"""
Make sure the required number of events are present in the calendar.
@param n: number of events
@type n: C{int}
"""
now = DateTime.getNowUTC()
for i in range(n - self.currentCount):
index = self.currentCount + i + 1
href = joinURL(calendarhref, "%d.ics" % (index,))
session.writeData(URL(path=href), ICAL % (now.getYear() + 1, index,), "text/calendar")
self.currentCount = n
class SharerSQLUsage(object):
def __init__(self, server, port, users, pswds, logFilePath, compact):
self.server = server
self.port = port
self.users = users
self.pswds = pswds
self.logFilePath = logFilePath
self.compact = compact
self.requestLabels = []
self.results = {}
self.currentCount = 0
def runLoop(self, sharee_counts):
# Make the sessions
sessions = [
SQLUsageSession(self.server, self.port, user=user, pswd=pswd, root="/", calendar="shared")
for user, pswd in itertools.izip(self.users, self.pswds)
]
sessions = sessions[0:1]
# Create the calendar first
sessions[0].makeCalendar(URL(path=sessions[0].calendarHref))
# Set of requests to execute
requests = [
MultigetTest("mget-1" if self.compact else "multiget-1", sessions, self.logFilePath, "share", 1),
MultigetTest("mget-50" if self.compact else "multiget-50", sessions, self.logFilePath, "share", 50),
PropfindInviteTest("propfind", sessions, self.logFilePath, "share", 1),
SyncTest("s-full" if self.compact else "sync-full", sessions, self.logFilePath, "share", True, 0),
SyncTest("s-1" if self.compact else "sync-1", sessions, self.logFilePath, "share", False, 1),
QueryTest("q-1" if self.compact else "query-1", sessions, self.logFilePath, "share", 1),
QueryTest("q-10" if self.compact else "query-10", sessions, self.logFilePath, "share", 10),
PutTest("put", sessions, self.logFilePath, "share"),
]
self.requestLabels = [request.label for request in requests]
# Warm-up server by doing shared calendar propfinds
props = (davxml.resourcetype,)
for session in sessions:
session.getPropertiesOnHierarchy(URL(path=session.calendarHref), props)
# Now loop over sets of events
for count in sharee_counts:
print("Testing count = %d" % (count,))
self.ensureSharees(sessions[0], sessions[0].calendarHref, count)
result = {}
for request in requests:
print(" Test = %s" % (request.label,))
result[request.label] = request.execute(count)
self.results[count] = result
def report(self):
self._printReport("SQL Statement Count", "count", "%d")
self._printReport("SQL Rows Returned", "rows", "%d")
self._printReport("SQL Time", "timing", "%.1f")
def _printReport(self, title, attr, colFormat):
table = tables.Table()
print(title)
headers = ["Sharees"] + self.requestLabels
table.addHeader(headers)
formats = [tables.Table.ColumnFormat("%d", tables.Table.ColumnFormat.RIGHT_JUSTIFY)] + \
[tables.Table.ColumnFormat(colFormat, tables.Table.ColumnFormat.RIGHT_JUSTIFY)] * len(self.requestLabels)
table.setDefaultColumnFormats(formats)
for k in sorted(self.results.keys()):
row = [k] + [getattr(self.results[k][item], attr) for item in self.requestLabels]
table.addRow(row)
os = StringIO()
table.printTable(os=os)
print(os.getvalue())
print("")
def ensureSharees(self, session, calendarhref, n):
"""
Make sure the required number of sharees are present in the calendar.
@param n: number of sharees
@type n: C{int}
"""
users = []
uids = []
for i in range(n - self.currentCount):
index = self.currentCount + i + 2
users.append("user%02d" % (index,))
uids.append("urn:x-uid:10000000-0000-0000-0000-000000000%03d" % (index,))
session.addInvitees(URL(path=calendarhref), uids, True)
# Now accept each one
for user in users:
acceptor = SQLUsageSession(self.server, self.port, user=user, pswd=user, root="/", calendar="shared")
notifications = acceptor.getNotifications(URL(path=acceptor.notificationHref))
principal = principalCache.getPrincipal(acceptor, acceptor.principalPath)
acceptor.processNotification(principal, notifications[0], True)
self.currentCount = n
def usage(error_msg=None):
if error_msg:
print(error_msg)
print("""Usage: sqlusage.py [options] [FILE]
Options:
-h Print this help and exit
--server Server hostname
--port Server port
--user User name
--pswd Password
--event Do event scaling
--share Do sharee sclaing
--event-counts Comma-separated list of event counts to test
--sharee-counts Comma-separated list of sharee counts to test
--compact Make printed tables as thin as possible
Arguments:
FILE File name for sqlstats.log to analyze.
Description:
This utility will analyze the output of s pg_stat_statement table.
""")
if error_msg:
raise ValueError(error_msg)
else:
sys.exit(0)
if __name__ == '__main__':
server = "localhost"
port = 8008
users = ("user01", "user02",)
pswds = ("user01", "user02",)
file = "sqlstats.logs"
event_counts = EVENT_COUNTS
sharee_counts = SHAREE_COUNTS
compact = False
do_all = True
do_event = False
do_share = False
options, args = getopt.getopt(
sys.argv[1:],
"h",
[
"server=", "port=",
"user=", "pswd=",
"compact",
"event", "share",
"event-counts=", "sharee-counts=",
]
)
for option, value in options:
if option == "-h":
usage()
elif option == "--server":
server = value
elif option == "--port":
port = int(value)
elif option == "--user":
users = value.split(",")
elif option == "--pswd":
pswds = value.split(",")
elif option == "--compact":
compact = True
elif option == "--event":
do_all = False
do_event = True
elif option == "--share":
do_all = False
do_share = True
elif option == "--event-counts":
event_counts = [int(i) for i in value.split(",")]
elif option == "--sharee-counts":
sharee_counts = [int(i) for i in value.split(",")]
else:
usage("Unrecognized option: %s" % (option,))
# Process arguments
if len(args) == 1:
file = args[0]
elif len(args) != 0:
usage("Must zero or one file arguments")
if do_all or do_event:
sql = EventSQLUsage(server, port, users, pswds, file, compact)
sql.runLoop(event_counts)
sql.report()
if do_all or do_share:
sql = SharerSQLUsage(server, port, users, pswds, file, compact)
sql.runLoop(sharee_counts)
sql.report()
| |
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
from __future__ import print_function
from __future__ import division
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized, defaultdict, Sequence
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin
from ._split import check_cv
from ._validation import _fit_and_score
from ..exceptions import NotFittedError
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..utils import check_random_state
from ..utils.fixes import sp_version
from ..utils.fixes import rankdata
from ..utils.fixes import MaskedArray
from ..utils.random import sample_without_replacement
from ..utils.validation import indexable, check_is_fitted
from ..utils.metaestimators import if_delegate_has_method
from ..metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d. For exhaustive searches, use "
"GridSearchCV." % (grid_size, self.n_iter))
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
if sp_version < (0, 16):
params[k] = v.rvs()
else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params=fit_params,
return_n_test_samples=True,
error_score=error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
if (isinstance(v, six.string_types) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError("Parameter values for parameter ({0}) need "
"to be a sequence(but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError("Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
# XXX Remove in 0.20
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise', return_train_score=True):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
return self.scorer_(self.best_estimator_, X, y)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError(('This GridSearchCV instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. ') % method_name)
else:
check_is_fitted(self, 'best_estimator_')
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate=('best_estimator_', 'estimator'))
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, groups, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
X, y, groups = indexable(X, y, groups)
n_splits = cv.get_n_splits(X, y, groups)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
cv_iter = list(cv.split(X, y, groups))
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
fit_params=self.fit_params,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv_iter)
# if one choose to see train score, "out" will contain train score info
if self.return_train_score:
(train_scores, test_scores, test_sample_counts,
fit_time, score_time, parameters) = zip(*out)
else:
(test_scores, test_sample_counts,
fit_time, score_time, parameters) = zip(*out)
candidate_params = parameters[::n_splits]
n_candidates = len(candidate_params)
results = dict()
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.average(array, axis=1, weights=weights)
results['mean_%s' % key_name] = array_means
# Weighted std is not directly available in numpy
array_stds = np.sqrt(np.average((array -
array_means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
# Computed the (weighted) mean and std for test scores alone
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
_store('test_score', test_scores, splits=True, rank=True,
weights=test_sample_counts if self.iid else None)
if self.return_train_score:
_store('train_score', train_scores, splits=True)
_store('fit_time', fit_time)
_store('score_time', score_time)
best_index = np.flatnonzero(results["rank_test_score"] == 1)[0]
best_parameters = candidate_params[best_index]
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates,),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
self.cv_results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best_parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
@property
def best_params_(self):
check_is_fitted(self, 'cv_results_')
return self.cv_results_['params'][self.best_index_]
@property
def best_score_(self):
check_is_fitted(self, 'cv_results_')
return self.cv_results_['mean_test_score'][self.best_index_]
@property
def grid_scores_(self):
warnings.warn(
"The grid_scores_ attribute was deprecated in version 0.18"
" in favor of the more elaborate cv_results_ attribute."
" The grid_scores_ attribute will not be available from 0.20",
DeprecationWarning)
check_is_fitted(self, 'cv_results_')
grid_scores = list()
for i, (params, mean, std) in enumerate(zip(
self.cv_results_['params'],
self.cv_results_['mean_test_score'],
self.cv_results_['std_test_score'])):
scores = np.array(list(self.cv_results_['split%d_test_score'
% s][i]
for s in range(self.n_splits_)),
dtype=np.float64)
grid_scores.append(_CVScoreTuple(params, mean, scores))
return grid_scores
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, default=True
If ``'False'``, the ``cv_results_`` attribute will not include training
scores.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=..., return_train_score=...,
scoring=..., verbose=...)
>>> sorted(clf.cv_results_.keys())
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
['mean_fit_time', 'mean_score_time', 'mean_test_score',...
'mean_train_score', 'param_C', 'param_kernel', 'params',...
'rank_test_score', 'split0_test_score',...
'split0_train_score', 'split1_test_score', 'split1_train_score',...
'split2_test_score', 'split2_train_score',...
'std_fit_time', 'std_score_time', 'std_test_score', 'std_train_score'...]
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|split0_test_score|...|rank_....|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.8, 0.7, 0.8, 0.9],
'split1_test_score' : [0.82, 0.5, 0.7, 0.78],
'mean_test_score' : [0.81, 0.60, 0.75, 0.82],
'std_test_score' : [0.02, 0.01, 0.03, 0.03],
'rank_test_score' : [2, 4, 3, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise',
return_train_score=True):
super(GridSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, groups=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, groups, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
return_train_score : boolean, default=True
If ``'False'``, the ``cv_results_`` attribute will not include training
scores.
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | split0_test_score |...|rank_test_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.8 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.9 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.7 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.8, 0.9, 0.7],
'split1_test_score' : [0.82, 0.5, 0.7],
'mean_test_score' : [0.81, 0.7, 0.7],
'std_test_score' : [0.02, 0.2, 0.],
'rank_test_score' : [3, 1, 1],
'split0_train_score' : [0.8, 0.9, 0.7],
'split1_train_score' : [0.82, 0.5, 0.7],
'mean_train_score' : [0.81, 0.7, 0.7],
'std_train_score' : [0.03, 0.03, 0.04],
'mean_fit_time' : [0.73, 0.63, 0.43, 0.49],
'std_fit_time' : [0.01, 0.02, 0.01, 0.01],
'mean_score_time' : [0.007, 0.06, 0.04, 0.04],
'std_score_time' : [0.001, 0.002, 0.003, 0.005],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise', return_train_score=True):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score,
return_train_score=return_train_score)
def fit(self, X, y=None, groups=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, groups, sampled_params)
| |
# Copyright (c) 2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tarfile
from urllib import quote, unquote
from xml.sax import saxutils
from swift.common.swob import Request, HTTPBadGateway, \
HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \
HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \
wsgify
from swift.common.utils import json, TRUE_VALUES
from swift.common.constraints import check_utf8, MAX_FILE_SIZE
from swift.common.http import HTTP_BAD_REQUEST, HTTP_UNAUTHORIZED, \
HTTP_NOT_FOUND
from swift.common.constraints import MAX_OBJECT_NAME_LENGTH, \
MAX_CONTAINER_NAME_LENGTH
MAX_PATH_LENGTH = MAX_OBJECT_NAME_LENGTH + MAX_CONTAINER_NAME_LENGTH + 2
class CreateContainerError(Exception):
def __init__(self, msg, status_int, status):
self.status_int = status_int
self.status = status
Exception.__init__(self, msg)
ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml',
'text/xml']
class Bulk(object):
"""
Middleware that will do many operations on a single request.
Extract Archive:
Expand tar files into a swift account. Request must be a PUT with the
header X-Extract-Archive specifying the format of archive file. Accepted
formats are tar, tar.gz, and tar.bz2.
For a PUT to the following url:
/v1/AUTH_Account/$UPLOAD_PATH
UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a
container, a pseudo-directory within a container, or an empty string. The
destination of a file in the archive will be built as follows:
/v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH
Where FILE_PATH is the file name from the listing in the tar file.
If the UPLOAD_PATH is an empty string, containers will be auto created
accordingly and files in the tar that would not map to any container (files
in the base directory) will be ignored.
Only regular files will be uploaded. Empty directories, symlinks, etc will
not be uploaded.
If all valid files were uploaded successfully will return an HTTPCreated
response. If any files failed to be created will return an HTTPBadGateway
response. In both cases the response body will specify the number of files
successfully uploaded and a list of the files that failed. The return body
will be formatted in the way specified in the request's Accept header.
Acceptable formats are text/plain, application/json, application/xml, and
text/xml.
Bulk Delete:
Will delete multiple objects from their account with a single request.
Responds to DELETE requests with a header 'X-Bulk-Delete: true'.
The Content-Type should be set to text/plain. The body of the DELETE
request will be a newline separated list of url encoded objects to delete.
You can only delete 1000 (configurable) objects per request. The objects
specified in the DELETE request body must be URL encoded and in the form:
/container_name/obj_name
If all objects were successfully deleted (or did not exist), will return an
HTTPOk. If any objects failed to delete, will return an HTTPBadGateway. In
both cases the response body will specify the number of objects
successfully deleted, not found, and a list of the objects that failed.
The return body will be formatted in the way specified in the request's
Accept header. Acceptable formats are text/plain, application/json,
apllication/xml, and text/xml.
"""
def __init__(self, app, conf):
self.app = app
self.max_containers = int(
conf.get('max_containers_per_extraction', 10000))
self.max_failed_extractions = int(
conf.get('max_failed_extractions', 1000))
self.max_deletes_per_request = int(
conf.get('max_deletes_per_request', 1000))
def create_container(self, req, container_path):
"""
Makes a subrequest to create a new container.
:params container_path: an unquoted path to a container to be created
:returns: None on success
:raises: CreateContainerError on creation error
"""
new_env = req.environ.copy()
new_env['PATH_INFO'] = container_path
create_cont_req = Request.blank(container_path, environ=new_env)
resp = create_cont_req.get_response(self.app)
if resp.status_int // 100 != 2:
raise CreateContainerError(
"Create Container Failed: " + container_path,
resp.status_int, resp.status)
def get_objs_to_delete(self, req):
"""
Will populate objs_to_delete with data from request input.
:params req: a Swob request
:returns: a list of the contents of req.body when separated by newline.
:raises: HTTPException on failures
"""
line = ''
data_remaining = True
objs_to_delete = []
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
raise HTTPBadRequest('Invalid request: no content sent.')
while data_remaining:
if len(objs_to_delete) > self.max_deletes_per_request:
raise HTTPRequestEntityTooLarge(
'Maximum Bulk Deletes: %d per request' %
self.max_deletes_per_request)
if '\n' in line:
obj_to_delete, line = line.split('\n', 1)
objs_to_delete.append(obj_to_delete)
else:
data = req.body_file.read(MAX_PATH_LENGTH)
if data:
line += data
else:
data_remaining = False
if line.strip():
objs_to_delete.append(line)
if len(line) > MAX_PATH_LENGTH * 2:
raise HTTPBadRequest('Invalid File Name')
return objs_to_delete
def get_response_body(self, data_format, data_dict, error_list):
"""
Returns a properly formatted response body according to format.
:params data_format: resulting format
:params data_dict: generated data about results.
:params error_list: list of quoted filenames that failed
"""
if data_format == 'text/plain':
output = ''
for key in sorted(data_dict.keys()):
output += '%s: %s\n' % (key, data_dict[key])
output += 'Errors:\n'
output += '\n'.join(
['%s, %s' % (name, status)
for name, status in error_list])
return output
if data_format == 'application/json':
data_dict['Errors'] = error_list
return json.dumps(data_dict)
if data_format.endswith('/xml'):
output = '<?xml version="1.0" encoding="UTF-8"?>\n<delete>\n'
for key in sorted(data_dict.keys()):
xml_key = key.replace(' ', '_').lower()
output += '<%s>%s</%s>\n' % (xml_key, data_dict[key], xml_key)
output += '<errors>\n'
output += '\n'.join(
['<object>'
'<name>%s</name><status>%s</status>'
'</object>' % (saxutils.escape(name), status) for
name, status in error_list])
output += '</errors>\n</delete>\n'
return output
raise HTTPNotAcceptable('Invalid output type')
def handle_delete(self, req):
"""
:params req: a swob Request
:raises HTTPException: on unhandled errors
:returns: a swob Response
"""
try:
vrs, account, _junk = req.split_path(2, 3, True)
except ValueError:
return HTTPNotFound(request=req)
incoming_format = req.headers.get('Content-Type')
if incoming_format and not incoming_format.startswith('text/plain'):
# For now only accept newline separated object names
return HTTPNotAcceptable(request=req)
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if not out_content_type:
return HTTPNotAcceptable(request=req)
objs_to_delete = self.get_objs_to_delete(req)
failed_files = []
success_count = not_found_count = 0
failed_file_response_type = HTTPBadRequest
for obj_to_delete in objs_to_delete:
obj_to_delete = obj_to_delete.strip().lstrip('/')
if not obj_to_delete:
continue
obj_to_delete = unquote(obj_to_delete)
delete_path = '/'.join(['', vrs, account, obj_to_delete])
if not check_utf8(delete_path):
failed_files.append([quote(delete_path),
HTTPPreconditionFailed().status])
continue
new_env = req.environ.copy()
new_env['PATH_INFO'] = delete_path
del(new_env['wsgi.input'])
new_env['CONTENT_LENGTH'] = 0
new_env['HTTP_USER_AGENT'] = \
'%s BulkDelete' % req.environ.get('HTTP_USER_AGENT')
delete_obj_req = Request.blank(delete_path, new_env)
resp = delete_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
success_count += 1
elif resp.status_int == HTTP_NOT_FOUND:
not_found_count += 1
elif resp.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
else:
if resp.status_int // 100 == 5:
failed_file_response_type = HTTPBadGateway
failed_files.append([quote(delete_path), resp.status])
resp_body = self.get_response_body(
out_content_type,
{'Number Deleted': success_count,
'Number Not Found': not_found_count},
failed_files)
if (success_count or not_found_count) and not failed_files:
return HTTPOk(resp_body, content_type=out_content_type)
if failed_files:
return failed_file_response_type(
resp_body, content_type=out_content_type)
return HTTPBadRequest('Invalid bulk delete.')
def handle_extract(self, req, compress_type):
"""
:params req: a swob Request
:params compress_type: specifying the compression type of the tar.
Accepts '', 'gz, or 'bz2'
:raises HTTPException: on unhandled errors
:returns: a swob response to request
"""
success_count = 0
failed_files = []
existing_containers = set()
out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS)
if not out_content_type:
return HTTPNotAcceptable(request=req)
if req.content_length is None and \
req.headers.get('transfer-encoding', '').lower() != 'chunked':
return HTTPBadRequest('Invalid request: no content sent.')
try:
vrs, account, extract_base = req.split_path(2, 3, True)
except ValueError:
return HTTPNotFound(request=req)
extract_base = extract_base or ''
extract_base = extract_base.rstrip('/')
try:
tar = tarfile.open(mode='r|' + compress_type,
fileobj=req.body_file)
while True:
tar_info = tar.next()
if tar_info is None or \
len(failed_files) >= self.max_failed_extractions:
break
if tar_info.isfile():
obj_path = tar_info.name
if obj_path.startswith('./'):
obj_path = obj_path[2:]
obj_path = obj_path.lstrip('/')
if extract_base:
obj_path = extract_base + '/' + obj_path
if '/' not in obj_path:
continue # ignore base level file
destination = '/'.join(
['', vrs, account, obj_path])
container = obj_path.split('/', 1)[0]
if not check_utf8(destination):
failed_files.append(
[quote(destination[:MAX_PATH_LENGTH]),
HTTPPreconditionFailed().status])
continue
if tar_info.size > MAX_FILE_SIZE:
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
HTTPRequestEntityTooLarge().status])
continue
if container not in existing_containers:
try:
self.create_container(
req, '/'.join(['', vrs, account, container]))
existing_containers.add(container)
except CreateContainerError, err:
if err.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
err.status])
continue
except ValueError:
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]),
HTTP_BAD_REQUEST])
continue
if len(existing_containers) > self.max_containers:
return HTTPBadRequest(
'More than %d base level containers in tar.' %
self.max_containers)
tar_file = tar.extractfile(tar_info)
new_env = req.environ.copy()
new_env['wsgi.input'] = tar_file
new_env['PATH_INFO'] = destination
new_env['CONTENT_LENGTH'] = tar_info.size
new_env['HTTP_USER_AGENT'] = \
'%s BulkExpand' % req.environ.get('HTTP_USER_AGENT')
create_obj_req = Request.blank(destination, new_env)
resp = create_obj_req.get_response(self.app)
if resp.status_int // 100 == 2:
success_count += 1
else:
if resp.status_int == HTTP_UNAUTHORIZED:
return HTTPUnauthorized(request=req)
failed_files.append([
quote(destination[:MAX_PATH_LENGTH]), resp.status])
resp_body = self.get_response_body(
out_content_type,
{'Number Files Created': success_count},
failed_files)
if success_count and not failed_files:
return HTTPCreated(resp_body, content_type=out_content_type)
if failed_files:
return HTTPBadGateway(resp_body, content_type=out_content_type)
return HTTPBadRequest('Invalid Tar File: No Valid Files')
except tarfile.TarError, tar_error:
return HTTPBadRequest('Invalid Tar File: %s' % tar_error)
@wsgify
def __call__(self, req):
extract_type = \
req.headers.get('X-Extract-Archive', '').lower().strip('.')
if extract_type and req.method == 'PUT':
archive_type = {'tar': '', 'tar.gz': 'gz',
'tar.bz2': 'bz2'}.get(extract_type)
if archive_type is not None:
return self.handle_extract(req, archive_type)
else:
return HTTPBadRequest("Unsupported archive format")
if (req.headers.get('X-Bulk-Delete', '').lower() in TRUE_VALUES and
req.method == 'DELETE'):
return self.handle_delete(req)
return self.app
def filter_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
def bulk_filter(app):
return Bulk(app, conf)
return bulk_filter
| |
"""Forms for uploading diffs."""
from __future__ import unicode_literals
import base64
import json
from functools import partial
from dateutil.parser import isoparse
from django import forms
from django.core.exceptions import ValidationError
from django.utils.encoding import force_text
from django.utils.translation import ugettext, ugettext_lazy as _
from reviewboard.diffviewer.commit_utils import (deserialize_validation_info,
get_file_exists_in_history)
from reviewboard.diffviewer.differ import DiffCompatVersion
from reviewboard.diffviewer.diffutils import check_diff_size
from reviewboard.diffviewer.filediff_creator import create_filediffs
from reviewboard.diffviewer.models import DiffCommit, DiffSet
from reviewboard.diffviewer.validators import (COMMIT_ID_LENGTH,
validate_commit_id)
class BaseCommitValidationForm(forms.Form):
"""A form mixin for handling validation metadata for commits."""
validation_info = forms.CharField(
label=_('Validation metadata'),
help_text=_('Validation metadata generated by the diff commit '
'validation resource.'),
widget=forms.HiddenInput,
required=False)
def clean_validation_info(self):
"""Clean the validation_info field.
This method ensures that if the field is supplied that it parses as
base64-encoded JSON.
Returns:
dict:
The parsed validation information.
Raises:
django.core.exceptions.ValidationError:
The value could not be parsed.
"""
validation_info = self.cleaned_data.get('validation_info', '').strip()
if not validation_info:
return {}
try:
return deserialize_validation_info(validation_info)
except (TypeError, ValueError) as e:
raise ValidationError(
ugettext(
'Could not parse validation info "%(validation_info)s": '
'%(exc)s'
) % {
'exc': e,
'validation_info': validation_info,
})
class UploadCommitForm(BaseCommitValidationForm):
"""The form for uploading a diff and creating a DiffCommit."""
diff = forms.FileField(
label=_('Diff'),
help_text=_('The new diff to upload.'))
parent_diff = forms.FileField(
label=_('Parent diff'),
help_text=_('An optional diff that the main diff is based on. '
'This is usually used for distributed revision control '
'systems (Git, Mercurial, etc.).'),
required=False)
commit_id = forms.CharField(
label=_('Commit ID'),
help_text=_('The ID of this commit.'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id])
parent_id = forms.CharField(
label=_('Parent commit ID'),
help_text=_('The ID of the parent commit.'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id])
commit_message = forms.CharField(
label=_('Description'),
help_text=_('The commit message.'))
author_name = forms.CharField(
label=_('Author name'),
help_text=_('The name of the author of this commit.'),
max_length=DiffCommit.NAME_MAX_LENGTH)
author_email = forms.CharField(
label=_('Author e-mail address'),
help_text=_('The e-mail address of the author of this commit.'),
max_length=DiffCommit.EMAIL_MAX_LENGTH,
widget=forms.EmailInput)
author_date = forms.CharField(
label=_('Author date'),
help_text=_('The date and time this commit was authored.'))
committer_name = forms.CharField(
label=_('Committer name'),
help_text=_('The name of the committer of this commit.'),
max_length=DiffCommit.NAME_MAX_LENGTH,
required=True)
committer_email = forms.CharField(
label=_('Committer e-mail address'),
help_text=_('The e-mail address of the committer of this commit.'),
max_length=DiffCommit.EMAIL_MAX_LENGTH,
widget=forms.EmailInput,
required=True)
committer_date = forms.CharField(
label=_('Committer date'),
help_text=_('The date and time this commit was committed.'),
required=True)
def __init__(self, diffset, request=None, *args, **kwargs):
"""Initialize the form.
Args:
diffset (reviewboard.diffviewer.models.diffset.DiffSet):
The DiffSet to attach the created DiffCommit to.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
*args (tuple):
Additional positional arguments.
**kwargs (dict):
Additional keyword arguments.
"""
super(UploadCommitForm, self).__init__(*args, **kwargs)
if not diffset.repository.scmtool_class.commits_have_committer:
del self.fields['committer_date']
del self.fields['committer_email']
del self.fields['committer_name']
self.diffset = diffset
self.request = request
def create(self):
"""Create the DiffCommit.
Returns:
reviewboard.diffviewer.models.diffcommit.DiffCommit:
The created DiffCommit.
"""
assert self.is_valid()
return DiffCommit.objects.create_from_upload(
request=self.request,
validation_info=self.cleaned_data['validation_info'],
diffset=self.diffset,
repository=self.diffset.repository,
diff_file=self.cleaned_data['diff'],
parent_diff_file=self.cleaned_data.get('parent_diff'),
commit_message=self.cleaned_data['commit_message'],
commit_id=self.cleaned_data['commit_id'],
parent_id=self.cleaned_data['parent_id'],
author_name=self.cleaned_data['author_name'],
author_email=self.cleaned_data['author_email'],
author_date=self.cleaned_data['author_date'],
committer_name=self.cleaned_data.get('committer_name'),
committer_email=self.cleaned_data.get('committer_email'),
committer_date=self.cleaned_data.get('committer_date'))
def clean(self):
"""Clean the form.
Returns:
dict:
The cleaned form data.
Raises:
django.core.exceptions.ValidationError:
The form data was not valid.
"""
super(UploadCommitForm, self).clean()
if self.diffset.history_id is not None:
# A diffset will have a history attached if and only if it has been
# published, in which case we cannot attach further commits to it.
raise ValidationError(ugettext(
'Cannot upload commits to a published diff.'))
if (self.diffset.commit_count and
'validation_info' not in self.cleaned_data and
'validation_info' not in self.errors):
# If validation_info is present in `errors`, it will not be in
# self.cleaned_data. We do not want to report it missing if it
# failed validation for another reason.
self._errors['validation_info'] = self.error_class([
self.fields['validation_info'].error_messages['required'],
])
return self.cleaned_data
def clean_author_date(self):
"""Parse the date and time in the author_date field.
Returns:
datetime.datetime:
The parsed date and time.
"""
try:
return isoparse(self.cleaned_data['author_date'])
except ValueError:
raise ValidationError(ugettext(
'This date must be in ISO 8601 format.'))
def clean_committer_date(self):
"""Parse the date and time in the committer_date field.
Returns:
datetime.datetime:
The parsed date and time.
"""
try:
return isoparse(self.cleaned_data['committer_date'])
except ValueError:
raise ValidationError(ugettext(
'This date must be in ISO 8601 format.'))
class UploadDiffForm(forms.Form):
"""The form for uploading a diff and creating a DiffSet."""
path = forms.FileField(
label=_('Diff'),
help_text=_('The new diff to upload.'))
parent_diff_path = forms.FileField(
label=_('Parent Diff'),
help_text=_('An optional diff that the main diff is based on. '
'This is usually used for distributed revision control '
'systems (Git, Mercurial, etc.).'),
required=False)
basedir = forms.CharField(
label=_('Base Directory'),
help_text=_('The absolute path in the repository the diff was '
'generated in.'))
base_commit_id = forms.CharField(
label=_('Base Commit ID'),
help_text=_('The ID/revision this change is built upon.'),
required=False)
def __init__(self, repository, request=None, *args, **kwargs):
"""Initialize the form.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository the diff will be uploaded against.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
*args (tuple):
Additional positional arguments.
**kwrgs (dict):
Additional keyword arguments.
"""
super(UploadDiffForm, self).__init__(*args, **kwargs)
self.repository = repository
self.request = request
if repository.diffs_use_absolute_paths:
# This SCMTool uses absolute paths, so there's no need to ask
# the user for the base directory.
del(self.fields['basedir'])
def clean_base_commit_id(self):
"""Clean the ``base_commit_id`` field.
Returns:
unicode:
The ``base_commit_id`` field stripped of leading and trailing
whitespace, or ``None`` if that value would be empty.
"""
return self.cleaned_data['base_commit_id'].strip() or None
def clean_basedir(self):
"""Clean the ``basedir`` field.
Returns:
unicode:
The basedir field as a unicode string with leading and trailing
whitespace removed.
"""
if self.repository.diffs_use_absolute_paths:
return ''
return force_text(self.cleaned_data['basedir'].strip())
def create(self, diffset_history=None):
"""Create the DiffSet.
Args:
diffset_history (reviewboard.diffviewer.models.diffset_history.
DiffSetHistory):
The DiffSet history to attach the created DiffSet to.
Returns:
reviewboard.diffviewer.models.diffset.DiffSet:
The created DiffSet.
"""
assert self.is_valid()
return DiffSet.objects.create_from_upload(
repository=self.repository,
diffset_history=diffset_history,
diff_file=self.cleaned_data['path'],
parent_diff_file=self.cleaned_data.get('parent_diff_path'),
basedir=self.cleaned_data.get('basedir', ''),
base_commit_id=self.cleaned_data['base_commit_id'],
request=self.request)
class ValidateCommitForm(BaseCommitValidationForm):
"""A form for validating of DiffCommits."""
diff = forms.FileField(
label=_('Diff'),
help_text=_('The new diff to upload.'))
parent_diff = forms.FileField(
label=_('Parent diff'),
help_text=_('An optional diff that the main diff is based on. '
'This is usually used for distributed revision control '
'systems (Git, Mercurial, etc.).'),
required=False)
commit_id = forms.CharField(
label=_('Commit ID'),
help_text=_('The ID of this commit.'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id])
parent_id = forms.CharField(
label=_('Parent commit ID'),
help_text=_('The ID of the parent commit.'),
max_length=COMMIT_ID_LENGTH,
validators=[validate_commit_id])
base_commit_id = forms.CharField(
label=_('Base commit ID'),
help_text=_('The base commit ID that the commits are based off of.'),
required=False)
def __init__(self, repository, request=None, *args, **kwargs):
"""Initialize the form.
Args:
repository (reviewboard.scmtools.models.Repository):
The repository against which the diff is being validated.
request (django.http.HttpRequest, optional):
The HTTP request from the client.
*args (tuple):
Additional positional arguments to pass to the base
class initializer.
**kwargs (dict):
Additional keyword arguments to pass to the base class
initializer.
"""
super(ValidateCommitForm, self).__init__(*args, **kwargs)
self.repository = repository
self.request = request
def clean(self):
"""Clean the form.
Returns:
dict:
The cleaned form data.
Raises:
django.core.exceptions.ValidationError:
The form data was not valid.
"""
super(ValidateCommitForm, self).clean()
validation_info = self.cleaned_data.get('validation_info')
if validation_info:
errors = []
parent_id = self.cleaned_data.get('parent_id')
commit_id = self.cleaned_data.get('commit_id')
if commit_id and commit_id in validation_info:
errors.append(ugettext('This commit was already validated.'))
elif parent_id and parent_id not in validation_info:
errors.append(ugettext('The parent commit was not validated.'))
if errors:
self._errors['validation_info'] = self.error_class(errors)
self.cleaned_data.pop('validation_info')
return self.cleaned_data
def validate_diff(self):
"""Validate the DiffCommit.
This will attempt to parse the given diff (and optionally parent
diff) into :py:class:`FileDiffs
<reviewboard.diffviewer.models.filediff.FileDiff>`. This will not
result in anything being committed to the database.
Returns:
tuple:
A 2-tuple containing the following:
* A list of the created FileDiffs.
* A list of the parent FileDiffs, or ``None``.
Raises:
reviewboard.diffviewer.errors.DiffParserError:
The diff could not be parsed.
reviewboard.diffviewer.errors.DiffTooBigError:
The diff was too big.
reviewboard.diffviewer.errors.EmptyDiffError:
The diff did not contain any changes.
reviewboard.scmtools.errors.FileNotFoundError:
A file was not found in the repository.
reviewboard.scmtools.errors.SCMError:
An error occurred within the SCMTool.
"""
assert self.is_valid()
diff_file = self.cleaned_data['diff']
parent_diff_file = self.cleaned_data.get('parent_diff')
validation_info = self.cleaned_data.get('validation_info')
check_diff_size(diff_file, parent_diff_file)
if parent_diff_file:
parent_diff_file_contents = parent_diff_file.read()
else:
parent_diff_file_contents = None
base_commit_id = self.cleaned_data['base_commit_id']
diffset = DiffSet(name='diff',
revision=0,
basedir='',
repository=self.repository,
diffcompat=DiffCompatVersion.DEFAULT,
base_commit_id=base_commit_id)
get_file_exists = partial(get_file_exists_in_history,
validation_info or {},
self.repository,
self.cleaned_data['parent_id'])
return create_filediffs(
diff_file_contents=diff_file.read(),
parent_diff_file_contents=parent_diff_file_contents,
repository=self.repository,
basedir='',
base_commit_id=base_commit_id,
get_file_exists=get_file_exists,
diffset=diffset,
request=self.request,
diffcommit=None,
validate_only=True)
| |
from __future__ import absolute_import, unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import IntegerField, BooleanField
from django.forms.util import ErrorList
from django.forms.widgets import Media, HiddenInput
from django.utils.encoding import python_2_unicode_compatible
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.six.moves import xrange
from django.utils.translation import ungettext, ugettext as _
__all__ = ('BaseFormSet', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
# default maximum number of forms in a formset, to prevent memory exhaustion
DEFAULT_MAX_NUM = 1000
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
# MAX_NUM_FORM_COUNT is output with the rest of the management form,
# but only for the convenience of client-side code. The POST
# value of MAX_NUM_FORM_COUNT returned from the client is not checked.
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
# construct the forms in the formset
self._construct_forms()
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
# return absolute_max if it is lower than the actual total form
# count in the data; this is DoS protection to prevent clients
# from forcing the server to instantiate arbitrary numbers of
# forms
return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)
else:
initial_forms = self.initial_form_count()
total_forms = initial_forms + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the initial data if it's there, 0 otherwise.
initial_forms = len(self.initial) if self.initial else 0
return initial_forms
def _construct_forms(self):
# instantiate all the forms and put them in self.forms
self.forms = []
# DoS protection is included in total_form_count()
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i))
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and not 'initial' in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
)
self.add_fields(form, None)
return form
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion.
"""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is not None:
return self._non_form_errors
return self.error_class()
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
err = self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not bool(self.non_form_errors())
def full_clean(self):
"""
Cleans all of self.data and populates self._errors.
"""
self._errors = []
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
try:
if (self.validate_max and self.total_form_count() > self.max_num) or \
self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max:
raise ValidationError(ungettext(
"Please submit %d or fewer forms.",
"Please submit %d or fewer forms.", self.max_num) % self.max_num)
# Give self.clean() a chance to do cross-form validation.
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.messages)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accesible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index+1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join([form.as_table() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join([form.as_p() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join([form.as_ul() for form in self])
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False):
"""Return a FormSet for the given form class."""
if max_num is None:
max_num = DEFAULT_MAX_NUM
# hard limit on forms instantiated, to prevent memory-exhaustion attacks
# limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM
# if max_num is None in the first place)
absolute_max = max_num + DEFAULT_MAX_NUM
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'max_num': max_num, 'absolute_max': absolute_max,
'validate_max' : validate_max}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# coding=utf-8
# Copyright 2013 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for Native IPMI power driver module.
"""
import mock
from ironic.common import exception
from ironic.common import states
from ironic.conductor import task_manager
from ironic.db import api as db_api
from ironic.drivers.modules import ipminative
from ironic.tests import base
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from oslo.config import cfg
CONF = cfg.CONF
class IPMINativePrivateMethodTestCase(base.TestCase):
"""Test cases for ipminative private methods."""
def setUp(self):
super(IPMINativePrivateMethodTestCase, self).setUp()
n = db_utils.get_test_node(
driver='fake_ipminative',
driver_info=db_utils.ipmi_info)
self.dbapi = db_api.get_instance()
self.node = self.dbapi.create_node(n)
self.info = ipminative._parse_driver_info(self.node)
ipmi_patch = mock.patch('pyghmi.ipmi.command.Command')
self.ipmi_mock = ipmi_patch.start()
self.addCleanup(ipmi_patch.stop)
def test__parse_driver_info(self):
# make sure we get back the expected things
self.assertIsNotNone(self.info.get('address'))
self.assertIsNotNone(self.info.get('username'))
self.assertIsNotNone(self.info.get('password'))
self.assertIsNotNone(self.info.get('uuid'))
# make sure error is raised when info, eg. username, is missing
_driver_info = {
'ipmi': {
"address": "2.2.3.4",
"password": "fake",
}
}
node = db_utils.get_test_node(driver_info=_driver_info)
self.assertRaises(exception.InvalidParameterValue,
ipminative._parse_driver_info,
node)
def test__power_status_on(self):
ipmicmd = self.ipmi_mock.return_value
ipmicmd.get_power.return_value = {'powerstate': 'on'}
state = ipminative._power_status(self.info)
ipmicmd.get_power.assert_called_once_with()
self.assertEqual(state, states.POWER_ON)
def test__power_status_off(self):
ipmicmd = self.ipmi_mock.return_value
ipmicmd.get_power.return_value = {'powerstate': 'off'}
state = ipminative._power_status(self.info)
ipmicmd.get_power.assert_called_once_with()
self.assertEqual(state, states.POWER_OFF)
def test__power_status_error(self):
ipmicmd = self.ipmi_mock.return_value
ipmicmd.get_power.return_value = {'powerstate': 'Error'}
state = ipminative._power_status(self.info)
ipmicmd.get_power.assert_called_once_with()
self.assertEqual(state, states.ERROR)
def test__power_on(self):
ipmicmd = self.ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'on'}
self.config(native_ipmi_waiting_time=400)
state = ipminative._power_on(self.info)
ipmicmd.set_power.assert_called_once_with('on', 400)
self.assertEqual(state, states.POWER_ON)
def test__power_off(self):
ipmicmd = self.ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'off'}
self.config(native_ipmi_waiting_time=500)
state = ipminative._power_off(self.info)
ipmicmd.set_power.assert_called_once_with('off', 500)
self.assertEqual(state, states.POWER_OFF)
def test__reboot(self):
ipmicmd = self.ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'on'}
self.config(native_ipmi_waiting_time=600)
state = ipminative._reboot(self.info)
ipmicmd.set_power.assert_called_once_with('boot', 600)
self.assertEqual(state, states.POWER_ON)
class IPMINativeDriverTestCase(db_base.DbTestCase):
"""Test cases for ipminative.NativeIPMIPower class functions.
"""
def setUp(self):
super(IPMINativeDriverTestCase, self).setUp()
self.dbapi = db_api.get_instance()
self.driver = mgr_utils.get_mocked_node_manager(
driver='fake_ipminative')
n = db_utils.get_test_node(
driver='fake_ipminative',
driver_info=db_utils.ipmi_info)
self.dbapi = db_api.get_instance()
self.node = self.dbapi.create_node(n)
self.info = ipminative._parse_driver_info(self.node)
def test_get_power_state(self):
with mock.patch('pyghmi.ipmi.command.Command') as ipmi_mock:
ipmicmd = ipmi_mock.return_value
return_values = [{'powerstate': 'error'},
{'powerstate': 'on'},
{'powerstate': 'off'}]
def side_effect():
return return_values.pop()
ipmicmd.get_power.side_effect = side_effect
pstate = self.driver.power.get_power_state(None, self.node)
self.assertEqual(pstate, states.POWER_OFF)
pstate = self.driver.power.get_power_state(None, self.node)
self.assertEqual(pstate, states.POWER_ON)
pstate = self.driver.power.get_power_state(None, self.node)
self.assertEqual(pstate, states.ERROR)
ipmicmd.get_power.assert_called
def test_set_power_on_ok(self):
with mock.patch.object(ipminative, '_power_on') as power_on_mock:
power_on_mock.return_value = states.POWER_ON
with task_manager.acquire([self.node['uuid']]) as task:
self.driver.power.set_power_state(
task, self.node, states.POWER_ON)
power_on_mock.assert_called_once_with(self.info)
def test_set_power_off_ok(self):
with mock.patch.object(ipminative, '_power_off') as power_off_mock:
power_off_mock.return_value = states.POWER_OFF
with task_manager.acquire([self.node['uuid']]) as task:
self.driver.power.set_power_state(
task, self.node, states.POWER_OFF)
power_off_mock.assert_called_once_with(self.info)
def test_set_power_on_fail(self):
with mock.patch('pyghmi.ipmi.command.Command') as ipmi_mock:
ipmicmd = ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'error'}
self.config(native_ipmi_waiting_time=500)
with task_manager.acquire([self.node['uuid']]) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.set_power_state,
task,
self.node,
states.POWER_ON)
ipmicmd.set_power.assert_called_once_with('on', 500)
def test_set_boot_device_ok(self):
with mock.patch('pyghmi.ipmi.command.Command') as ipmi_mock:
ipmicmd = ipmi_mock.return_value
ipmicmd.set_bootdev.return_value = None
with task_manager.acquire([self.node['uuid']]) as task:
self.driver.power._set_boot_device(task,
self.node,
'pxe')
ipmicmd.set_bootdev.assert_called_once_with('pxe')
def test_set_boot_device_bad_device(self):
with task_manager.acquire([self.node['uuid']]) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.power._set_boot_device,
task,
self.node,
'fake-device')
def test_reboot_ok(self):
with mock.patch.object(ipminative, '_reboot') as reboot_mock:
reboot_mock.return_value = None
with task_manager.acquire([self.node['uuid']]) as task:
self.driver.power.reboot(task, self.node)
reboot_mock.assert_called_once_with(self.info)
def test_reboot_fail(self):
with mock.patch('pyghmi.ipmi.command.Command') as ipmi_mock:
ipmicmd = ipmi_mock.return_value
ipmicmd.set_power.return_value = {'powerstate': 'error'}
self.config(native_ipmi_waiting_time=500)
with task_manager.acquire([self.node['uuid']]) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.reboot,
task,
self.node)
ipmicmd.set_power.assert_called_once_with('boot', 500)
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from datetime import datetime, timedelta
from dateutil import parser
from future.utils import native
from requests.exceptions import RequestException
from sqlalchemy import Column, Integer, Float, DateTime, String, Unicode, ForeignKey, Table, or_, \
and_
from sqlalchemy.orm import relation
from sqlalchemy.orm.exc import MultipleResultsFound
from flexget import db_schema, plugin
from flexget.event import event
from flexget.utils import requests
from flexget.utils.database import with_session, json_synonym
from flexget.utils.tools import split_title_year
log = logging.getLogger('api_tvmaze')
DB_VERSION = 6
Base = db_schema.versioned_base('tvmaze', DB_VERSION)
UPDATE_INTERVAL = 7 # Used for expiration, number is in days
BASE_URL = 'http://api.tvmaze.com'
TVMAZE_ENDPOINTS = {
'tvmaze_id': '/shows/{}',
'imdb_id': '/lookup/shows?imdb={}',
'tvrage_id': '/lookup/shows?tvrage={}',
'thetvdb_id': '/lookup/shows?thetvdb={}',
'show_name': '/singlesearch/shows?q={}',
'date': '/shows/{}/episodesbydate?date={}',
'number': '/shows/{}/episodebynumber?season={}&number={}'
}
@db_schema.upgrade('tvmaze')
def upgrade(ver, session):
if ver is None or ver < 6:
raise db_schema.UpgradeImpossible
return ver
class TVMazeGenre(Base):
__tablename__ = 'tvmaze_genres'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(Unicode, unique=True)
genres_table = Table('tvmaze_series_genres', Base.metadata,
Column('series_id', Integer, ForeignKey('tvmaze_series.tvmaze_id')),
Column('genre_id', Integer, ForeignKey('tvmaze_genres.id')))
Base.register_table(genres_table)
class TVMazeLookup(Base):
__tablename__ = 'tvmaze_lookup'
id = Column(Integer, primary_key=True, autoincrement=True)
search_name = Column(Unicode, index=True, unique=True)
series_id = Column(Integer, ForeignKey('tvmaze_series.tvmaze_id'))
series = relation('TVMazeSeries', backref='search_strings')
def __init__(self, search_name, series_id=None, series=None):
self.search_name = search_name.lower()
if series_id:
self.series_id = series_id
if series:
self.series = series
def __repr__(self):
return '<TVMazeLookup(search_name={0},series_id={1})'.format(self.search_name, self.series_id)
class TVMazeSeries(Base):
__tablename__ = 'tvmaze_series'
tvmaze_id = Column(Integer, primary_key=True)
status = Column(Unicode)
rating = Column(Float)
genres = relation(TVMazeGenre, secondary=genres_table)
weight = Column(Integer)
updated = Column(DateTime) # last time show was updated at tvmaze
name = Column(Unicode)
language = Column(Unicode)
_schedule = Column('schedule', Unicode)
schedule = json_synonym('_schedule')
url = Column(String)
original_image = Column(String)
medium_image = Column(String)
tvdb_id = Column(Integer)
tvrage_id = Column(Integer)
premiered = Column(DateTime)
year = Column(Integer)
summary = Column(Unicode)
webchannel = Column(String)
runtime = Column(Integer)
show_type = Column(String)
network = Column(Unicode)
episodes = relation('TVMazeEpisodes', order_by='TVMazeEpisodes.season_number', cascade='all, delete, delete-orphan',
backref='series')
last_update = Column(DateTime) # last time we updated the db for the show
def __init__(self, series, session):
self.tvmaze_id = series['id']
self.update(series, session)
def to_dict(self):
return {
'tvmaze_id': self.tvmaze_id,
'status': self.status,
'rating': self.rating,
'genres': [genre.name for genre in self.genres],
'weight': self.weight,
'updated': self.updated,
'name': self.name,
'language': self.language,
'schedule': self.schedule,
'url': self.url,
'original_image': self.original_image,
'medium_image': self.medium_image,
'tvdb_id': self.tvdb_id,
'tvrage_id': self.tvrage_id,
'premiered': self.premiered,
'year': self.year,
'summary': self.summary,
'webchannel': self.webchannel,
'runtime': self.runtime,
'show_type': self.show_type,
'network': self.network,
'last_update': self.last_update
}
def update(self, series, session):
self.status = series['status']
self.rating = series['rating']['average']
self.weight = series['weight']
self.updated = datetime.fromtimestamp(series['updated'])
self.name = series['name']
self.language = series['language']
self.schedule = series['schedule']
self.url = series['url']
self.original_image = series.get('image').get('original') if series.get('image') else None
self.medium_image = series.get('image').get('medium') if series.get('image') else None
self.tvdb_id = series['externals'].get('thetvdb')
self.tvrage_id = series['externals'].get('tvrage')
self.premiered = parser.parse(series.get('premiered'), ignoretz=True) if series.get('premiered') else None
self.year = int(series.get('premiered')[:4]) if series.get('premiered') else None
self.summary = series['summary']
self.webchannel = series.get('web_channel')['name'] if series.get('web_channel') else None
self.runtime = series['runtime']
self.show_type = series['type']
self.network = series.get('network')['name'] if series.get('network') else None
self.last_update = datetime.now()
self.genres[:] = get_db_genres(series['genres'], session)
def __repr__(self):
return '<TVMazeSeries(title=%s,id=%s,last_update=%s)>' % (self.name, self.tvmaze_id, self.last_update)
def __str__(self):
return self.name
@property
def expired(self):
if not self.last_update:
log.debug('no last update attribute, series set for update')
return True
time_dif = datetime.now() - self.last_update
expiration = time_dif.days > UPDATE_INTERVAL
return expiration
class TVMazeEpisodes(Base):
__tablename__ = 'tvmaze_episode'
tvmaze_id = Column(Integer, primary_key=True)
series_id = Column(Integer, ForeignKey('tvmaze_series.tvmaze_id'), nullable=False)
number = Column(Integer, nullable=False)
season_number = Column(Integer, nullable=False)
title = Column(Unicode)
airdate = Column(DateTime)
url = Column(String)
original_image = Column(String)
medium_image = Column(String)
airstamp = Column(DateTime)
runtime = Column(Integer)
summary = Column(Unicode)
last_update = Column(DateTime)
def to_dict(self):
return {
'tvmaze_id': self.tvmaze_id,
'series_id': self.series_id,
'number': self.number,
'season_number': self.season_number,
'title': self.title,
'airdate': self.airdate,
'url': self.url,
'original_image': self.original_image,
'medium_image': self.medium_image,
'airstamp': self.airstamp,
'runtime': self.runtime,
'summary': self.summary,
'last_update': self.last_update
}
def __init__(self, episode, series_id):
self.series_id = series_id
self.tvmaze_id = episode['id']
self.season_number = episode['season']
self.number = episode['number']
self.update(episode)
def update(self, episode):
self.summary = episode['summary']
self.title = episode['name']
self.airdate = datetime.strptime(episode.get('airdate'), '%Y-%m-%d') if episode.get('airdate') else None
self.url = episode['url']
self.original_image = episode.get('image').get('original') if episode.get('image') else None
self.medium_image = episode.get('image').get('medium') if episode.get('image') else None
self.airstamp = parser.parse(episode.get('airstamp'), ignoretz=True) if episode.get('airstamp') else None
self.runtime = episode['runtime']
self.last_update = datetime.now()
@property
def expired(self):
if not self.last_update:
log.debug('no last update attribute, episode set for update')
return True
time_dif = datetime.now() - self.last_update
expiration = time_dif.days > UPDATE_INTERVAL
if expiration:
log.debug('episode %s, season %s for series %s is expired.', self.number, self.season_number,
self.series_id)
return expiration
def get_db_genres(genres, session):
db_genres = []
for genre in genres:
db_genre = session.query(TVMazeGenre).filter(TVMazeGenre.name == genre).first()
if not db_genre:
db_genre = TVMazeGenre(name=genre)
log.trace('adding genre %s to db', genre)
session.add(db_genre)
else:
log.trace('genre %s found in db, returning', db_genre.name)
db_genres.append(db_genre)
return db_genres
def search_params_for_series(**lookup_params):
search_params = {
'tvmaze_id': lookup_params.get('tvmaze_id'),
'tvdb_id': lookup_params.get('tvdb_id'),
'tvrage_id': lookup_params.get('tvrage_id'),
'name': lookup_params.get('title') or lookup_params.get('series_name')
}
log.debug('returning search params for series lookup: {0}'.format(search_params))
return search_params
@with_session
def from_cache(session=None, search_params=None, cache_type=None):
"""
Returns a result from requested table based on search params
:param session: Current session
:param search_params: Relevant search params. Should match table column names
:param cache_type: Object for search
:return: Query result
"""
if not any(search_params.values()):
raise LookupError('No parameters sent for cache lookup')
else:
log.debug('searching db {0} for the values {1}'.format(cache_type.__tablename__, list(search_params.items())))
result = session.query(cache_type).filter(
or_(getattr(cache_type, col) == val for col, val in search_params.items() if val)).first()
return result
@with_session
def from_lookup(session=None, title=None):
log.debug('searching lookup table using title {0}'.format(title))
return session.query(TVMazeLookup).filter(TVMazeLookup.search_name == title.lower()).first()
@with_session
def add_to_lookup(session=None, title=None, series=None):
log.debug('trying to add search title {0} to series {1} in lookup table'.format(title, series.name))
exist = session.query(TVMazeLookup).filter(TVMazeLookup.search_name == title.lower()).first()
if exist:
log.debug('title {0} already exist for series {1}, no need to save lookup'.format(title, series.name))
return
session.add(TVMazeLookup(search_name=title, series=series))
def prepare_lookup_for_tvmaze(**lookup_params):
"""
Return a dict of params which is valid with tvmaze API lookups
:param lookup_params: Search parameters
:return: Dict of tvmaze recognizable key words
"""
prepared_params = {}
title = None
series_name = lookup_params.get('series_name') or lookup_params.get('show_name') or lookup_params.get('title')
if series_name:
title, _ = split_title_year(series_name)
# Support for when title is just a number
if not title:
title = series_name
# Ensure we send native types to tvmaze lib as it does not handle new types very well
prepared_params['tvmaze_id'] = lookup_params.get('tvmaze_id')
prepared_params['thetvdb_id'] = lookup_params.get('tvdb_id') or lookup_params.get('trakt_series_tvdb_id')
prepared_params['tvrage_id'] = lookup_params.get('tvrage_id') or lookup_params.get('trakt_series_tvrage_id')
prepared_params['imdb_id'] = lookup_params.get('imdb_id')
prepared_params['show_name'] = native(title) if title else None
return prepared_params
class APITVMaze(object):
@staticmethod
@with_session
def series_lookup(session=None, only_cached=False, **lookup_params):
search_params = search_params_for_series(**lookup_params)
# Searching cache first
series = from_cache(session=session, cache_type=TVMazeSeries, search_params=search_params)
search = None
# Preparing search from lookup table
title = lookup_params.get('series_name') or lookup_params.get('show_name') or lookup_params.get('title')
if not series and title:
log.debug('did not find exact match for series {0} in cache, looking in search table'.format(
search_params['name']))
search = from_lookup(session=session, title=title)
if search and search.series:
series = search.series
log.debug('found series {0} from search table'.format(series.name))
if only_cached:
if series: # If force_cache is True, return series even if it expired
log.debug('forcing cache for series {0}'.format(series.name))
return series
raise LookupError('Series %s not found from cache' % lookup_params)
if series and not series.expired:
log.debug('returning series {0} from cache'.format(series.name))
return series
prepared_params = prepare_lookup_for_tvmaze(**lookup_params)
log.debug('trying to fetch series {0} from tvmaze'.format(title))
tvmaze_show = get_show(**prepared_params)
# See if series already exist in cache
series = session.query(TVMazeSeries).filter(TVMazeSeries.tvmaze_id == tvmaze_show['id']).first()
if series:
log.debug('series {0} is already in cache, checking for expiration'.format(series.name))
if series.expired:
series.update(tvmaze_show, session)
else:
log.debug('creating new series {0} in tvmaze_series db'.format(tvmaze_show['name']))
series = TVMazeSeries(tvmaze_show, session)
session.add(series)
# Check if show returned from lookup table as expired. Relevant only if search by title
if title:
if series and title.lower() == series.name.lower():
return series
elif series and not search:
log.debug('mismatch between search title {0} and series title {1}. '
'saving in lookup table'.format(title, series.name))
add_to_lookup(session=session, title=title, series=series)
elif series and search:
log.debug('Updating search result in db')
search.series = series
return series
@staticmethod
@with_session
def episode_lookup(session=None, only_cached=False, **lookup_params):
series_name = lookup_params.get('series_name') or lookup_params.get('title')
show_id = lookup_params.get('tvmaze_id') or lookup_params.get('tvdb_id')
lookup_type = lookup_params.get('series_id_type')
season_number = lookup_params.get('series_season')
episode_number = lookup_params.get('series_episode')
episode_date = lookup_params.get('series_date')
# Verify we have enough parameters for search
if not any([series_name, show_id]):
raise LookupError('Not enough parameters to lookup episode')
if lookup_type == 'sequence':
raise LookupError('TVMaze does not support sequence type searches')
if lookup_type == 'ep' and not all([season_number, episode_number]):
raise LookupError('Not enough parameters to lookup episode')
elif lookup_type == 'date' and not episode_date:
raise LookupError('Not enough parameters to lookup episode')
# Get series
series = APITVMaze.series_lookup(session=session, only_cached=only_cached, **lookup_params)
if not series:
raise LookupError('Could not find series with the following parameters: {0}'.format(lookup_params))
# See if episode already exists in cache
log.debug('searching for episode of show {0} in cache'.format(series.name))
episode = session.query(TVMazeEpisodes).filter(
and_(TVMazeEpisodes.series_id == series.tvmaze_id,
TVMazeEpisodes.season_number == season_number,
TVMazeEpisodes.number == episode_number)
).one_or_none()
# Logic for cache only mode
if only_cached:
if episode:
log.debug('forcing cache for episode id {3}, number{0}, season {1} for show {2}'
.format(episode.number, episode.season_number, series.name, episode.tvmaze_id))
return episode
if episode and not episode.expired:
log.debug('found episode id {3}, number {0}, season {1} for show {2} in cache'
.format(episode.number,
episode.season_number,
series.name,
episode.tvmaze_id))
return episode
# Lookup episode via its type (number or airdate)
if lookup_type == 'date':
episode_date = datetime.strftime(episode_date, '%Y-%m-%d')
tvmaze_episode = get_episode(series.tvmaze_id, date=episode_date)[0]
else:
# TODO will this match all series_id types?
log.debug(
'fetching episode {0} season {1} for series_id {2} for tvmaze'.format(episode_number,
season_number,
series.tvmaze_id))
tvmaze_episode = get_episode(series.tvmaze_id, season=season_number, number=episode_number)
# See if episode exists in DB
try:
episode = session.query(TVMazeEpisodes).filter(
or_(TVMazeEpisodes.tvmaze_id == tvmaze_episode['id'],
and_(
TVMazeEpisodes.number == tvmaze_episode['number'],
TVMazeEpisodes.season_number == tvmaze_episode['season'],
TVMazeEpisodes.series_id == series.tvmaze_id)
)
).one_or_none()
except MultipleResultsFound:
# TVMaze must have fucked up and now we have to clean up that mess. Delete any row for this season
# that hasn't been updated in the last hour. Can't trust any of the cached data, but deleting new data
# might have some unintended consequences.
log.warning('Episode lookup in cache returned multiple results. Deleting the cached data.')
deleted_rows = session.query(TVMazeEpisodes).filter(
and_(
TVMazeEpisodes.season_number == tvmaze_episode['season'],
TVMazeEpisodes.series_id == series.tvmaze_id)
).filter(TVMazeEpisodes.last_update <= datetime.now() - timedelta(hours=1)).delete()
log.debug('Deleted %s rows', deleted_rows)
episode = None
if episode:
log.debug('found expired episode {0} in cache, refreshing data.'.format(episode.tvmaze_id))
episode.update(tvmaze_episode)
else:
log.debug('creating new episode for show {0}'.format(series.name))
episode = TVMazeEpisodes(tvmaze_episode, series.tvmaze_id)
session.add(episode)
return episode
def get_show(show_name=None, tvmaze_id=None, imdb_id=None, tvrage_id=None, thetvdb_id=None):
if tvmaze_id:
return tvmaze_lookup('tvmaze_id', [tvmaze_id])
if imdb_id:
return tvmaze_lookup('imdb_id', [imdb_id])
if tvrage_id:
return tvmaze_lookup('tvrage_id', [tvrage_id])
if thetvdb_id:
return tvmaze_lookup('thetvdb_id', [thetvdb_id])
if show_name:
return tvmaze_lookup('show_name', [show_name])
raise LookupError('Not enough parameters sent for series lookup')
def get_episode(series_id, date=None, number=None, season=None):
if date:
return tvmaze_lookup('date', [series_id, date])
elif number and season:
return tvmaze_lookup('number', [series_id, season, number])
raise LookupError('Not enough parameters sent for episode lookup')
def tvmaze_lookup(lookup_type, lookup_values):
"""
Build the URL and return the reply from TVMaze API
:param lookup_type: Selects the endpoint that will be used
:param lookup_values: A list of values to be used in the URL
:return: A JSON reply from the API
"""
lookup_url = BASE_URL + TVMAZE_ENDPOINTS[lookup_type].format(*lookup_values)
log.debug('querying tvmaze API with the following URL: %s', lookup_url)
try:
result = requests.get(lookup_url).json()
except RequestException as e:
raise LookupError(e.args[0])
return result
@event('plugin.register')
def register_plugin():
plugin.register(APITVMaze, 'api_tvmaze', api_ver=2)
| |
from mpi4py import MPI
import mpiunittest as unittest
import sys
datatypes_c = [
MPI.CHAR, MPI.WCHAR,
MPI.SIGNED_CHAR, MPI.SHORT, MPI.INT, MPI.LONG,
MPI.UNSIGNED_CHAR, MPI.UNSIGNED_SHORT, MPI.UNSIGNED, MPI.UNSIGNED_LONG,
MPI.LONG_LONG, MPI.UNSIGNED_LONG_LONG,
MPI.FLOAT, MPI.DOUBLE, MPI.LONG_DOUBLE,
]
datatypes_c99 = [
MPI.C_BOOL,
MPI.INT8_T, MPI.INT16_T, MPI.INT32_T, MPI.INT64_T,
MPI.UINT8_T, MPI.UINT16_T, MPI.UINT32_T, MPI.UINT64_T,
MPI.C_COMPLEX, MPI.C_FLOAT_COMPLEX,
MPI.C_DOUBLE_COMPLEX, MPI.C_LONG_DOUBLE_COMPLEX,
]
datatypes_f = [
MPI.CHARACTER, MPI.LOGICAL, MPI.INTEGER,
MPI.REAL, MPI.DOUBLE_PRECISION,
MPI.COMPLEX, MPI.DOUBLE_COMPLEX,
]
datatypes_f90 = [
MPI.LOGICAL1, MPI.LOGICAL2, MPI.LOGICAL4, MPI.LOGICAL8,
MPI.INTEGER1, MPI.INTEGER2, MPI.INTEGER4, MPI.INTEGER8, MPI.INTEGER16,
MPI.REAL2, MPI.REAL4, MPI.REAL8, MPI.REAL16,
MPI.COMPLEX4, MPI.COMPLEX8, MPI.COMPLEX16, MPI.COMPLEX32,
]
datatypes_mpi = [
MPI.PACKED, MPI.BYTE, MPI.AINT, MPI.OFFSET,
]
datatypes = []
datatypes += datatypes_c
datatypes += datatypes_c99
datatypes += datatypes_f
datatypes += datatypes_f90
datatypes += datatypes_mpi
datatypes = [t for t in datatypes if t != MPI.DATATYPE_NULL]
combiner_map = {}
class TestDatatype(unittest.TestCase):
def testBoolEqNe(self):
for dtype in datatypes:
self.assertTrue (not not dtype)
self.assertTrue (dtype == MPI.Datatype(dtype))
self.assertFalse(dtype != MPI.Datatype(dtype))
def testGetExtent(self):
for dtype in datatypes:
lb, ext = dtype.Get_extent()
self.assertEqual(dtype.lb, lb)
self.assertEqual(dtype.ub, lb+ext)
self.assertEqual(dtype.extent, ext)
def testGetSize(self):
for dtype in datatypes:
size = dtype.Get_size()
self.assertTrue(dtype.size, size)
def testGetTrueExtent(self):
for dtype in datatypes:
try:
lb, ext = dtype.Get_true_extent()
self.assertEqual(dtype.true_lb, lb)
self.assertEqual(dtype.true_ub, lb+ext)
self.assertEqual(dtype.true_extent, ext)
except NotImplementedError:
self.skipTest('mpi-type-get_true_extent')
def testGetEnvelope(self):
for dtype in datatypes:
try:
envelope = dtype.Get_envelope()
except NotImplementedError:
self.skipTest('mpi-type-get_envelope')
if ('LAM/MPI' == MPI.get_vendor()[0] and
"COMPLEX" in dtype.name): continue
ni, na, nc, nd, combiner = envelope
self.assertEqual(combiner, MPI.COMBINER_NAMED)
self.assertEqual(ni, 0)
self.assertEqual(na, 0)
self.assertEqual(nc, 0)
self.assertEqual(nd, 0)
self.assertEqual(dtype.envelope, envelope)
self.assertEqual(dtype.combiner, combiner)
self.assertTrue(dtype.is_named)
self.assertTrue(dtype.is_predefined)
otype = dtype.decode()
self.assertTrue(dtype is otype)
def check_datatype_contents(self, oldtype, factory, newtype):
try:
envelope = newtype.Get_envelope()
contents = newtype.Get_contents()
except NotImplementedError:
self.skipTest('mpi-type-get_envelope')
ni, na, nc, nd, combiner = envelope
i, a, c, d = contents
self.assertEqual(ni, len(i))
self.assertEqual(na, len(a))
self.assertEqual(nc, len(c))
self.assertEqual(nd, len(d))
self.assertTrue(combiner != MPI.COMBINER_NAMED)
self.assertEqual(newtype.envelope, envelope)
self.assertEqual(newtype.contents, contents)
self.assertEqual(newtype.combiner, combiner)
self.assertFalse(newtype.is_named)
if combiner in (MPI.COMBINER_F90_INTEGER,
MPI.COMBINER_F90_REAL,
MPI.COMBINER_F90_COMPLEX,):
self.assertTrue(newtype.is_predefined)
else:
self.assertFalse(newtype.is_predefined)
name = factory.__name__
NAME = name.replace('Create_', '').upper()
symbol = getattr(MPI, 'COMBINER_' + NAME)
if symbol == MPI.UNDEFINED: return
if combiner_map is None: return
symbol = combiner_map.get(symbol, symbol)
if symbol is None: return
self.assertEqual(symbol, combiner)
decoded = newtype.decode()
oldtype, constructor, kargs = decoded
constructor = 'Create_' + constructor.lower()
newtype2 = getattr(oldtype, constructor)(**kargs)
decoded2 = newtype2.decode()
self.assertEqual(decoded[1], decoded2[1])
self.assertEqual(decoded[2], decoded2[2])
if combiner not in (MPI.COMBINER_F90_INTEGER,
MPI.COMBINER_F90_REAL,
MPI.COMBINER_F90_COMPLEX,):
self.assertFalse(newtype2.is_predefined)
newtype2.Free()
else:
self.assertTrue(newtype2.is_predefined)
def check_datatype(self, oldtype, factory, *args):
try:
if isinstance(oldtype, MPI.Datatype):
newtype = factory(oldtype, *args)
else:
newtype = factory(*args)
except NotImplementedError:
self.skipTest('mpi-type-constructor')
self.check_datatype_contents(oldtype, factory, newtype)
newtype.Commit()
self.check_datatype_contents(oldtype, factory, newtype)
combiner = newtype.Get_envelope()[-1]
if combiner not in (MPI.COMBINER_F90_INTEGER,
MPI.COMBINER_F90_REAL,
MPI.COMBINER_F90_COMPLEX,):
newtype.Free()
def testDup(self):
for dtype in datatypes:
factory = MPI.Datatype.Dup
self.check_datatype(dtype, factory)
def testCreateContiguous(self):
for dtype in datatypes:
for count in range(5):
factory = MPI.Datatype.Create_contiguous
args = (count, )
self.check_datatype(dtype, factory, *args)
def testCreateVector(self):
for dtype in datatypes:
for count in range(5):
for blocklength in range(5):
for stride in range(5):
factory = MPI.Datatype.Create_vector
args = (count, blocklength, stride)
self.check_datatype(dtype, factory, *args)
def testCreateHvector(self):
for dtype in datatypes:
for count in range(5):
for blocklength in range(5):
for stride in range(5):
factory = MPI.Datatype.Create_hvector
args = (count, blocklength, stride)
self.check_datatype(dtype, factory, *args)
def testCreateIndexed(self):
for dtype in datatypes:
for block in range(5):
blocklengths = list(range(block, block+5))
displacements = [0]
for b in blocklengths[:-1]:
stride = displacements[-1] + b * dtype.extent + 1
displacements.append(stride)
factory = MPI.Datatype.Create_indexed
args = (blocklengths, displacements)
self.check_datatype(dtype, factory, *args)
#args = (block, displacements) XXX
#self.check_datatype(dtype, factory, *args) XXX
def testCreateIndexedBlock(self):
for dtype in datatypes:
for block in range(5):
blocklengths = list(range(block, block+5))
displacements = [0]
for b in blocklengths[:-1]:
stride = displacements[-1] + b * dtype.extent + 1
displacements.append(stride)
factory = MPI.Datatype.Create_indexed_block
args = (block, displacements)
self.check_datatype(dtype, factory, *args)
def testCreateHindexed(self):
for dtype in datatypes:
for block in range(5):
blocklengths = list(range(block, block+5))
displacements = [0]
for b in blocklengths[:-1]:
stride = displacements[-1] + b * dtype.extent + 1
displacements.append(stride)
factory = MPI.Datatype.Create_hindexed
args = (blocklengths, displacements)
self.check_datatype(dtype, factory, *args)
#args = (block, displacements) XXX
#self.check_datatype(dtype, factory, *args) XXX
@unittest.skipMPI('openmpi(<=1.8.1)', MPI.VERSION == 3)
def testCreateHindexedBlock(self):
for dtype in datatypes:
for block in range(5):
displacements = [0]
for i in range(5):
stride = displacements[-1] + block * dtype.extent + 1
displacements.append(stride)
factory = MPI.Datatype.Create_hindexed_block
args = (block, displacements)
self.check_datatype(dtype, factory, *args)
def testCreateStruct(self):
for dtype1 in datatypes:
for dtype2 in datatypes:
dtypes = (dtype1, dtype2)
blocklengths = (2, 3)
displacements = [0]
for dtype in dtypes[:-1]:
stride = displacements[-1] + dtype.extent
displacements.append(stride)
factory = MPI.Datatype.Create_struct
args = (blocklengths, displacements, dtypes)
self.check_datatype(dtypes, factory, *args)
def testCreateSubarray(self):
for dtype in datatypes:
for ndim in range(1, 5):
for size in range(1, 5):
for subsize in range(1, size):
for start in range(size-subsize):
for order in [MPI.ORDER_C,
MPI.ORDER_FORTRAN,
MPI.ORDER_F,
]:
sizes = [size] * ndim
subsizes = [subsize] * ndim
starts = [start] * ndim
factory = MPI.Datatype.Create_subarray
args = sizes, subsizes, starts, order
self.check_datatype(dtype, factory, *args)
def testCreateDarray(self):
for dtype in datatypes:
for ndim in range(1, 3+1):
for size in (4, 8, 9, 27):
for rank in (0, size-1):
for dist in [MPI.DISTRIBUTE_BLOCK, MPI.DISTRIBUTE_CYCLIC]:
for order in [MPI.ORDER_C, MPI.ORDER_F]:
gsizes = [size]*ndim
distribs = [dist]*ndim
dargs = [MPI.DISTRIBUTE_DFLT_DARG]*ndim
psizes = MPI.Compute_dims(size, [0]*ndim)
factory = MPI.Datatype.Create_darray
args = size, rank, gsizes, distribs, dargs, psizes, order
self.check_datatype(dtype, factory, *args)
def testCreateF90Integer(self):
for r in (1, 2, 4):
factory = MPI.Datatype.Create_f90_integer
args = (r,)
self.check_datatype(None, factory, *args)
@unittest.skipMPI('openmpi(<3.0.0)')
@unittest.skipMPI('msmpi')
@unittest.skipMPI('SpectrumMPI')
def testCreateF90RealSingle(self):
(p, r) = (6, 30)
factory = MPI.Datatype.Create_f90_real
args = (p, r)
self.check_datatype(None, factory, *args)
@unittest.skipMPI('openmpi(<3.0.0)')
@unittest.skipMPI('msmpi')
@unittest.skipMPI('SpectrumMPI')
def testCreateF90RealDouble(self):
(p, r) = (15, 300)
factory = MPI.Datatype.Create_f90_real
args = (p, r)
self.check_datatype(None, factory, *args)
@unittest.skipMPI('openmpi(<3.0.0)')
@unittest.skipMPI('msmpi')
@unittest.skipMPI('SpectrumMPI')
def testCreateF90ComplexSingle(self):
(p, r) = (6, 30)
factory = MPI.Datatype.Create_f90_complex
args = (p, r)
self.check_datatype(None, factory, *args)
@unittest.skipMPI('openmpi(<3.0.0)')
@unittest.skipMPI('msmpi')
@unittest.skipMPI('SpectrumMPI')
def testCreateF90ComplexDouble(self):
(p, r) = (15, 300)
factory = MPI.Datatype.Create_f90_complex
args = (p, r)
self.check_datatype(None, factory, *args)
match_size_integer = [1, 2, 4, 8]
match_size_real = [4, 8]
match_size_complex = [8, 16]
@unittest.skipMPI('MPI(<2.0)')
@unittest.skipMPI('openmpi', (MPI.CHARACTER == MPI.DATATYPE_NULL or
MPI.CHARACTER.Get_size() == 0))
def testMatchSize(self):
typeclass = MPI.TYPECLASS_INTEGER
for size in self.match_size_integer:
datatype = MPI.Datatype.Match_size(typeclass, size)
self.assertEqual(size, datatype.size)
typeclass = MPI.TYPECLASS_REAL
for size in self.match_size_real:
datatype = MPI.Datatype.Match_size(typeclass, size)
self.assertEqual(size, datatype.size)
typeclass = MPI.TYPECLASS_COMPLEX
for size in self.match_size_complex:
datatype = MPI.Datatype.Match_size(typeclass, size)
self.assertEqual(size, datatype.size)
def testCreateResized(self):
for dtype in datatypes:
for lb in range(-10, 10):
for extent in range(1, 10):
factory = MPI.Datatype.Create_resized
args = lb, extent
self.check_datatype(dtype, factory, *args)
def testGetSetName(self):
for dtype in datatypes:
try:
name = dtype.Get_name()
self.assertTrue(name)
dtype.Set_name(name)
self.assertEqual(name, dtype.Get_name())
except NotImplementedError:
self.skipTest('mpi-type-name')
def testCommit(self):
for dtype in datatypes:
dtype.Commit()
name, version = MPI.get_vendor()
if name == 'LAM/MPI':
combiner_map[MPI.COMBINER_INDEXED_BLOCK] = MPI.COMBINER_INDEXED
elif name == 'MPICH1':
combiner_map[MPI.COMBINER_VECTOR] = None
combiner_map[MPI.COMBINER_HVECTOR] = None
combiner_map[MPI.COMBINER_INDEXED] = None
combiner_map[MPI.COMBINER_HINDEXED_BLOCK] = None
for t in datatypes_f: datatypes.remove(t)
elif MPI.Get_version() < (2,0):
combiner_map = None
if name == 'Open MPI':
for t in datatypes_f + datatypes_f90:
if t != MPI.DATATYPE_NULL:
if t.Get_size() == 0:
if t in datatypes:
datatypes.remove(t)
if (1,6,0) < version < (1,7,0):
TestDatatype.match_size_complex[:] = []
if version < (1,5,2):
for t in datatypes_f90[-4:]:
if t != MPI.DATATYPE_NULL:
datatypes.remove(t)
if name == 'Platform MPI':
combiner_map[MPI.COMBINER_INDEXED_BLOCK] = MPI.COMBINER_INDEXED
combiner_map[MPI.COMBINER_DARRAY] = MPI.COMBINER_STRUCT
combiner_map[MPI.COMBINER_SUBARRAY] = MPI.COMBINER_STRUCT
TestDatatype.match_size_complex[:] = []
if __name__ == '__main__':
unittest.main()
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test class for AMT ManagementInterface
"""
import mock
from oslo_config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.amt import common as amt_common
from ironic.drivers.modules.amt import management as amt_mgmt
from ironic.drivers.modules.amt import resource_uris
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.drivers.drac import utils as test_utils
from ironic.tests.drivers import third_party_driver_mock_specs as mock_specs
from ironic.tests.objects import utils as obj_utils
INFO_DICT = db_utils.get_test_amt_info()
CONF = cfg.CONF
@mock.patch.object(amt_common, 'pywsman', spec_set=mock_specs.PYWSMAN_SPEC)
class AMTManagementInteralMethodsTestCase(db_base.DbTestCase):
def setUp(self):
super(AMTManagementInteralMethodsTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_amt')
self.node = obj_utils.create_test_node(self.context,
driver='fake_amt',
driver_info=INFO_DICT)
def test__set_boot_device_order(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootConfigSetting
device = boot_devices.PXE
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
amt_mgmt._set_boot_device_order(self.node, device)
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
namespace, 'ChangeBootOrder', mock.ANY)
def test__set_boot_device_order_fail(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootConfigSetting
device = boot_devices.PXE
result_xml = test_utils.build_soap_xml([{'ReturnValue': '2'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
self.assertRaises(exception.AMTFailure,
amt_mgmt._set_boot_device_order, self.node, device)
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
namespace, 'ChangeBootOrder', mock.ANY)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = None
self.assertRaises(exception.AMTConnectFailure,
amt_mgmt._set_boot_device_order, self.node, device)
def test__enable_boot_config(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootService
result_xml = test_utils.build_soap_xml([{'ReturnValue': '0'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
amt_mgmt._enable_boot_config(self.node)
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
namespace, 'SetBootConfigRole', mock.ANY)
def test__enable_boot_config_fail(self, mock_client_pywsman):
namespace = resource_uris.CIM_BootService
result_xml = test_utils.build_soap_xml([{'ReturnValue': '2'}],
namespace)
mock_xml = test_utils.mock_wsman_root(result_xml)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = mock_xml
self.assertRaises(exception.AMTFailure,
amt_mgmt._enable_boot_config, self.node)
mock_pywsman.invoke.assert_called_once_with(mock.ANY,
namespace, 'SetBootConfigRole', mock.ANY)
mock_pywsman = mock_client_pywsman.Client.return_value
mock_pywsman.invoke.return_value = None
self.assertRaises(exception.AMTConnectFailure,
amt_mgmt._enable_boot_config, self.node)
class AMTManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(AMTManagementTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver='fake_amt')
self.info = INFO_DICT
self.node = obj_utils.create_test_node(self.context,
driver='fake_amt',
driver_info=self.info)
def test_get_properties(self):
expected = amt_common.COMMON_PROPERTIES
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected, task.driver.get_properties())
@mock.patch.object(amt_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
task.driver.management.validate(task)
mock_drvinfo.assert_called_once_with(task.node)
@mock.patch.object(amt_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate_fail(self, mock_drvinfo):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
mock_drvinfo.side_effect = exception.InvalidParameterValue('x')
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.validate,
task)
def test_get_supported_boot_devices(self):
expected = [boot_devices.PXE, boot_devices.DISK, boot_devices.CDROM]
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(
sorted(expected),
sorted(task.driver.management.get_supported_boot_devices()))
def test_set_boot_device_one_time(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, 'pxe')
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertFalse(
task.node.driver_internal_info["amt_boot_persistent"])
def test_set_boot_device_persistent(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, 'pxe',
persistent=True)
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
def test_set_boot_device_fail(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
task, 'fake-device')
@mock.patch.object(amt_mgmt, '_enable_boot_config', spec_set=True,
autospec=True)
@mock.patch.object(amt_mgmt, '_set_boot_device_order', spec_set=True,
autospec=True)
def test_ensure_next_boot_device_one_time(self, mock_sbdo, mock_ebc):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
device = boot_devices.PXE
task.node.driver_internal_info['amt_boot_device'] = 'pxe'
task.driver.management.ensure_next_boot_device(task.node, device)
self.assertEqual('disk',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
mock_sbdo.assert_called_once_with(task.node, device)
mock_ebc.assert_called_once_with(task.node)
@mock.patch.object(amt_mgmt, '_enable_boot_config', spec_set=True,
autospec=True)
@mock.patch.object(amt_mgmt, '_set_boot_device_order', spec_set=True,
autospec=True)
def test_ensure_next_boot_device_persistent(self, mock_sbdo, mock_ebc):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
device = boot_devices.PXE
task.node.driver_internal_info['amt_boot_device'] = 'pxe'
task.node.driver_internal_info['amt_boot_persistent'] = True
task.driver.management.ensure_next_boot_device(task.node, device)
self.assertEqual('pxe',
task.node.driver_internal_info["amt_boot_device"])
self.assertTrue(
task.node.driver_internal_info["amt_boot_persistent"])
mock_sbdo.assert_called_once_with(task.node, device)
mock_ebc.assert_called_once_with(task.node)
def test_get_boot_device(self):
expected = {'boot_device': boot_devices.DISK, 'persistent': True}
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertEqual(expected,
task.driver.management.get_boot_device(task))
def test_get_sensor_data(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
self.assertRaises(NotImplementedError,
task.driver.management.get_sensors_data,
task)
| |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Sagar Karandikar <skarandikar@berkeley.edu>
"""
import time
import urllib2
import datetime
from string import capwords
from smap.drivers.scraper import ScraperDriver
from smap.contrib import dtutil
urllib2.install_opener(urllib2.build_opener())
class NYIsoDriver(ScraperDriver):
"""Periodically scrape data from NYISO and publish it as sMAP feeds
"""
ITER = 0
DATA_TYPES = { "Forecasted Load": {"Unit": "MW", "Description": "Load",
'Uri': 'http://mis.nyiso.com/'
'public/csv/isolf/<date>isolf.csv'},
"Integrated Actual Load": {"Unit": "MW", "Description": "Load",
'Uri': 'http://mis.nyiso.com/'
'public/csv/palIntegrated/<date>palIntegrated.csv'},
"Actual Load": {"Unit": "MW", "Description": "Load", 'Uri':
'http://mis.nyiso.com/public/csv/pal/<date>pal.csv'},
"Actual LMP": {"Unit": "$/MWh", "Description": "LMP",
'Uri': 'http://mis.nyiso.com/'
'public/realtime/realtime_zone_lbmp.csv'},
"Forecasted LMP": {"Unit": "$/MWh", "Description": "LMP",
'Uri': 'http://mis.nyiso.com/'
'public/csv/damlbmp/<date>damlbmp_zone.csv'},
"Actual Marginal Cost Losses": {"Unit": "$/MWh", "Description":
"Marginal Cost Losses",
'Uri': 'http://mis.nyiso.com/'
'public/realtime/realtime_zone_lbmp.csv'},
"Forecasted Marginal Cost Losses": {"Unit": "$/MWh", "Description":
"Marginal Cost Losses",
'Uri': 'http://mis.nyiso.com/'
'public/csv/damlbmp/<date>damlbmp_zone.csv'},
"Actual Flow Transfer Interface": {"Unit": "MW", "Description":
"Transfer Interface",
'Uri': 'http://mis.nyiso.com/'
'public/csv/ExternalLimitsFlows/currentExternalLimitsFlows.csv'},
"Negative Limit Transfer Interface": {"Unit": "MW", "Description":
"Transfer Interface",
'Uri': 'http://mis.nyiso.com/'
'public/csv/ExternalLimitsFlows/currentExternalLimitsFlows.csv'},
"Positive Limit Transfer Interface": {"Unit": "MW", "Description":
"Transfer Interface",
'Uri': 'http://mis.nyiso.com/'
'public/csv/ExternalLimitsFlows/currentExternalLimitsFlows.csv'},
"Actual Marginal Cost Congestion": {"Unit": "$/MWh", "Description":
"Marginal Cost Congestion",
'Uri': 'http://mis.nyiso.com/'
'public/realtime/realtime_zone_lbmp.csv'},
"Forecasted Marginal Cost Congestion": {"Unit": "$/MWh", "Description":
"Marginal Cost Congestion",
'Uri': 'http://mis.nyiso.com/'
'public/csv/damlbmp/<date>damlbmp_zone.csv'}
}
def scrape(self):
self.nyiso_out = { "Load": {}, "Transfer Interface": {}, "LMP": {},
"Marginal Cost Losses": {}, "Marginal Cost Congestion": {} }
self.actual_load()
self.pred_load()
self.int_actual_load()
self.forecast_lmp()
self.actual_lmp()
self.transfer_interface()
return self.nyiso_out
def actual_load(self):
actload = urllib2.urlopen(self.urlgen('http://mis.nyiso.com/public/'
'csv/pal/', 'pal.csv', 0))
lines = actload.readlines()
actload.close()
lines.pop(0)
for line in lines:
temp = line.strip().split(",")
temp[0] = temp[0].replace('"', '')
temp[2] = self.match(temp[2].replace('"', ''))
if len(temp[len(temp)-1]) == 0:
continue
point = [self.parse_time(temp[0], 0), float(temp[4])]
if temp[2] in self.nyiso_out["Load"].keys():
self.nyiso_out["Load"][temp[2]]["Actual"].append(point)
else:
self.nyiso_out["Load"][temp[2]] = {}
self.nyiso_out["Load"][temp[2]]["Actual"] = [point]
def pred_load(self):
try:
predload = urllib2.urlopen(self.urlgen('http://mis.nyiso.com/'
'public/csv/isolf/', 'isolf.csv', 86400))
except:
predload = urllib2.urlopen(self.urlgen('http://mis.nyiso.com/'
'public/csv/isolf/', 'isolf.csv', 0))
lines = predload.readlines()
predload.close()
col = eval("[" + lines.pop(0).replace('"Time Stamp",', "") + "]")
for x in range(len(col)):
col[x] = self.match(col[x])
for place in col:
if place == self.match("NYISO"):
col[col.index(self.match("NYISO"))] = "Total Area"
place = "Total Area"
if place not in self.nyiso_out["Load"].keys():
self.nyiso_out["Load"][place] = { "Forecasted": [] }
else:
self.nyiso_out["Load"][place]["Forecasted"] = []
for line in lines:
temp = line.strip().split(",")
thistime = self.parse_time(temp.pop(0).replace('"', ''), 1)
for placeval in temp:
point = [thistime, float(placeval)]
self.nyiso_out["Load"][col[self.ITER]]["Forecasted"].append(
point)
self.inf_iterate(col)
def int_actual_load(self):
actload = urllib2.urlopen(self.urlgen('http://mis.nyiso.com/public/csv/'
'palIntegrated/','palIntegrated.csv', 0))
lines = actload.readlines()
actload.close()
lines.pop(0)
for line in lines:
temp = line.strip().split(",")
temp[0] = temp[0].replace('"', '')
temp[2] = self.match(temp[2].replace('"', ''))
if len(temp[len(temp)-1]) == 0:
continue
point = [self.parse_time(temp[0], 0), float(temp[4])]
if temp[2] in self.nyiso_out["Load"].keys():
k = self.nyiso_out["Load"][temp[2]].keys()
if "Integrated Actual" in k:
self.nyiso_out["Load"][temp[2]]["Integrated Actual"].append(
point)
else:
self.nyiso_out["Load"][temp[2]]["Integrated Actual"] = []
else:
self.nyiso_out["Load"][temp[2]] = {}
self.nyiso_out["Load"][temp[2]]["Integrated Actual"] = [point]
def forecast_lmp(self):
#try except to handle inconsistent next-day upload time
try:
actload = urllib2.urlopen(self.urlgen('http://mis.nyiso.com/public'
'/csv/damlbmp/','damlbmp_zone.csv', 86400))
except:
actload = urllib2.urlopen(self.urlgen('http://mis.nyiso.com/public'
'/csv/damlbmp/','damlbmp_zone.csv', 0))
lines = actload.readlines()
actload.close()
temps = []
lines.pop(0)
for line in lines:
temp = line.strip().split(",")
temp[0] = temp[0].replace('"', '')
temp[1] = self.match(temp[1].replace('"', ''))
temps.append(temp)
first_place = temps[0][1]
first = True
for line in temps:
if line[1] not in first_place or first:
self.nyiso_out["LMP"][line[1]] = {}
self.nyiso_out["LMP"][line[1]]["Forecasted"] = []
self.nyiso_out["Marginal Cost Losses"][line[1]] = {}
self.nyiso_out["Marginal Cost Losses"][line[1]]["Fore"
"casted"] = []
self.nyiso_out["Marginal Cost Congestion"][line[1]] = {}
self.nyiso_out["Marginal Cost Congestion"][line[1]]["Forecast"
"ed"] = []
first = False
else:
break
for temp in temps:
if len(temp) != 6:
continue
point = [self.parse_time(temp[0], 1), float(temp[3])]
self.nyiso_out["LMP"][temp[1]]["Forecasted"].append(point)
point = [self.parse_time(temp[0], 1), float(temp[4])]
self.nyiso_out["Marginal Cost Losses"][temp[1]]["Fore"
"casted"].append(point)
point = [self.parse_time(temp[0], 1), float(temp[5])]
self.nyiso_out["Marginal Cost Congestion"][temp[1]]["Forecast"
"ed"].append(point)
def actual_lmp(self):
actload = urllib2.urlopen('http://mis.nyiso.com/public/'
'realtime/realtime_zone_lbmp.csv')
lines = actload.readlines()
actload.close()
temps = []
lines.pop(0)
for line in lines:
temp = line.strip().split(",")
temp[0] = temp[0].replace('"', '')
temp[1] = self.match(temp[1].replace('"', ''))
temps.append(temp)
first_place = temps[0][1]
first = True
for line in temps:
if line[1] not in first_place or first:
self.nyiso_out["LMP"][line[1]]["Actual"] = []
self.nyiso_out["Marginal Cost Losses"][line[1]]["Act"
"ual"] = []
self.nyiso_out["Marginal Cost Congestion"][line[1]]["Act"
"ual"] = []
first = False
else:
break
for temp in temps:
if len(temp) != 6:
continue
point = [self.parse_time(temp[0], 0), float(temp[3])]
self.nyiso_out["LMP"][temp[1]]["Actual"].append(point)
point = [self.parse_time(temp[0], 0), float(temp[4])]
self.nyiso_out["Marginal Cost Losses"][temp[1]]["Act"
"ual"].append(point)
point = [self.parse_time(temp[0], 0), float(temp[5])]
self.nyiso_out["Marginal Cost Congestion"][temp[1]]["Actu"
"al"].append(point)
def transfer_interface(self):
trans_load = urllib2.urlopen('http://mis.nyiso.com/public/csv/External'
'LimitsFlows/currentExternalLimitsFlows.csv')
lines = trans_load.readlines()
lines.pop(0)
trans_load.close()
for line in lines:
temp = line.strip().split(",")
temp[0] = self.parse_time(temp[0], 1)
assemble = {"Actual Flow": [[temp[0], float(temp[3])]],
"Positive Limit": [[temp[0], float(temp[4])]],
"Negative Limit": [[temp[0], float(temp[5])]]}
self.nyiso_out["Transfer Interface"][temp[1]] = assemble
def inf_iterate(self, col):
"""Quick infinite iterator for column-to-data matching"""
if self.ITER == len(col)-1:
self.ITER = 0
else:
self.ITER += 1
def parse_time(self, time_str, fmt_int):
fmt_strs = ["%m/%d/%Y %H:%M:%S", "%m/%d/%Y %H:%M"]
time_str = time.strptime(time_str, fmt_strs[fmt_int])
data_time = time.mktime(time_str)
return int(data_time)
def match(self, name_string):
"""Match place names since NYISO does not capitalize uniformly"""
return capwords(str.lower(name_string))
def urlgen(self, prefix, suffix, offset):
"""Generate the url for nyiso feeds. The produced output is
"Prefix"+date+"Suffix". The offset is used when requesting future or
past dates, e.g. for forcasted load"""
basetime = dtutil.now("America/New_York")
reqtime = basetime + datetime.timedelta(seconds=offset)
url = reqtime.strftime("%Y%m%d")
url = prefix + url + suffix
return url
def setup(self, opts):
self.lastLatests = {}
self.update_frequency = 300
scraped = self.scrape()
for data_type in scraped.keys():
for location in scraped[data_type].keys():
for valtype in scraped[data_type][location].keys():
path = "/" + data_type + "/" + location + "/" + valtype
temp = self.add_timeseries(path, "NYISO" + data_type +
location + valtype,
self.DATA_TYPES[valtype + " " + data_type]["Unit"],
data_type = "double", description =
valtype + " " +
self.DATA_TYPES[valtype + " " + data_type]["Description"]
+ " for " + location)
temp['Metadata'] = { 'Location' : {'Country': 'USA', 'Area':
'New York', 'Uri': self.DATA_TYPES[valtype
+ " " + data_type]["Uri"]}, 'Extra' : {'ISOName': 'NYISO',
'ISOType': data_type, 'ISOSubType': location,
'ISODataType': valtype }
}
temp['Properties']['Timezone'] = "America/New_York"
self.lastLatests[path] = None
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from six import with_metaclass
from azure.core import CaseInsensitiveEnumMeta
class ActionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Enum. Indicates the action type. "Internal" refers to actions that are for internal only APIs.
"""
INTERNAL = "Internal"
class ConnectionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""A connection type for access labs and VMs (Public, Private or None).
"""
PUBLIC = "Public"
PRIVATE = "Private"
NONE = "None"
class CreatedByType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class CreateOption(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Indicates what lab virtual machines are created from.
"""
#: An image is used to create all lab user virtual machines. When this option is set, no template
#: VM will be created.
IMAGE = "Image"
#: A template VM will be used to create all lab user virtual machines.
TEMPLATE_VM = "TemplateVM"
class EnableState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Property enabled state.
"""
ENABLED = "Enabled"
DISABLED = "Disabled"
class InvitationState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The lab user invitation state.
"""
#: The invitation has not been sent.
NOT_SENT = "NotSent"
#: Currently sending the invitation.
SENDING = "Sending"
#: The invitation has been successfully sent.
SENT = "Sent"
#: There was an error while sending the invitation.
FAILED = "Failed"
class LabServicesSkuTier(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The tier of the SKU.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
class LabState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The state of a virtual machine.
"""
#: The lab is currently in draft (has not been published).
DRAFT = "Draft"
#: The lab is publishing.
PUBLISHING = "Publishing"
#: The lab is scaling.
SCALING = "Scaling"
#: The lab is syncing users.
SYNCING = "Syncing"
#: The lab has been published.
PUBLISHED = "Published"
class OperationStatus(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The operation status
"""
#: The operation has been accepted but hasn't started.
NOT_STARTED = "NotStarted"
#: The operation is running.
IN_PROGRESS = "InProgress"
#: The operation Succeeded.
SUCCEEDED = "Succeeded"
#: The operation failed.
FAILED = "Failed"
#: Not supported yet.
CANCELED = "Canceled"
class Origin(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The intended executor of the operation; as in Resource Based Access Control (RBAC) and audit
logs UX. Default value is "user,system"
"""
USER = "user"
SYSTEM = "system"
USER_SYSTEM = "user,system"
class OsState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The operating system state.
"""
#: Image does not contain any machine and user specific information.
GENERALIZED = "Generalized"
#: Image contains machine and user specific information.
SPECIALIZED = "Specialized"
class OsType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The operating system type.
"""
WINDOWS = "Windows"
LINUX = "Linux"
class ProvisioningState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Resource provisioning state.
"""
#: Resource is in the process of being created.
CREATING = "Creating"
#: New property values are being applied to the resource.
UPDATING = "Updating"
#: Resource is in the process of being deleted.
DELETING = "Deleting"
#: Resource is in healthy state after creation or update operation.
SUCCEEDED = "Succeeded"
#: Previous operation on the resource has failed leaving resource in unhealthy state.
FAILED = "Failed"
#: The resource is locked and changes are currently blocked. This could be due to maintenance or a
#: scheduled operation. The state will go back to succeeded once the locking operation has
#: finished.
LOCKED = "Locked"
class RecurrenceFrequency(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Schedule recurrence frequencies.
"""
#: Schedule will run every days.
DAILY = "Daily"
#: Schedule will run every week on days specified in weekDays.
WEEKLY = "Weekly"
class RegistrationState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The user lab registration state.
"""
#: User has not yet registered with the lab.
REGISTERED = "Registered"
#: User has registered with the lab.
NOT_REGISTERED = "NotRegistered"
class RestrictionReasonCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The reason for the restriction.
"""
QUOTA_ID = "QuotaId"
NOT_AVAILABLE_FOR_SUBSCRIPTION = "NotAvailableForSubscription"
class RestrictionType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of restriction.
"""
LOCATION = "Location"
class ScaleType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The localized name of the resource.
"""
#: The capacity is not adjustable in any way.
NONE = "None"
#: The user must manually scale this SKU in and out.
MANUAL = "Manual"
#: The user is permitted to scale this SKU in and out.
AUTOMATIC = "Automatic"
class ShutdownOnIdleMode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Defines whether to shut down VM on idle and the criteria for idle detection.
"""
#: The VM won't be shut down when it is idle.
NONE = "None"
#: The VM will be considered as idle when there is no keyboard or mouse input.
USER_ABSENCE = "UserAbsence"
#: The VM will be considered as idle when user is absent and the resource (CPU and disk)
#: consumption is low.
LOW_USAGE = "LowUsage"
class SkuTier(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""This field is required to be implemented by the Resource Provider if the service has more than
one tier, but is not required on a PUT.
"""
FREE = "Free"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
class UsageUnit(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The unit details.
"""
COUNT = "Count"
class VirtualMachineState(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The state of a virtual machine.
"""
#: The VM is currently stopped.
STOPPED = "Stopped"
#: The VM is starting.
STARTING = "Starting"
#: The VM is running.
RUNNING = "Running"
#: The VM is stopping.
STOPPING = "Stopping"
#: The VM password is being reset.
RESETTING_PASSWORD = "ResettingPassword"
#: The VM is being reimaged.
REIMAGING = "Reimaging"
#: The VM is being redeployed.
REDEPLOYING = "Redeploying"
class VirtualMachineType(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""The type of the lab virtual machine.
"""
#: A user VM.
USER = "User"
#: A template VM.
TEMPLATE = "Template"
class WeekDay(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)):
"""Days of the week.
"""
#: Schedule will run on Sunday.
SUNDAY = "Sunday"
#: Schedule will run on Monday.
MONDAY = "Monday"
#: Schedule will run on Tuesday.
TUESDAY = "Tuesday"
#: Schedule will run on Wednesday.
WEDNESDAY = "Wednesday"
#: Schedule will run on Thursday.
THURSDAY = "Thursday"
#: Schedule will run on Friday.
FRIDAY = "Friday"
#: Schedule will run on Saturday.
SATURDAY = "Saturday"
| |
import sys
import traceback
import string
import inspect
import imp
def embed():
return "embed" in sys.__dict__
import colored_traceback
colored_traceback.add_hook(always=True)
from JumpScale import j
def embed():
return "embed" in sys.__dict__
if not embed():
from JumpScale.core.errorhandling.ErrorConditionObject import ErrorConditionObject, LEVELMAP
else:
from ErrorConditionObject import ErrorConditionObject, LEVELMAP
# class BaseException(Exception):
# def __init__(self, message="", eco=None):
# print ("OUR BASE EXCEPTION")
# self.message = message
# self.eco = eco
# def __str__(self):
# if self.eco!=None:
# return str(j.errorconditionhandler.getErrorConditionObject(self.eco))
# return "Unexpected Error Happened"
# __repr__ = __str__
from JSExceptions import *
import JSExceptions
class ErrorConditionHandler:
def __init__(self, haltOnError=True, storeErrorConditionsLocal=True):
self.__jslocation__ = "j.errorconditionhandler"
self._blacklist = None
self.lastAction = ""
self.haltOnError = haltOnError
self.setExceptHook()
self.lastEco = None
self.escalateToRedis = None
self.exceptions = JSExceptions
j.exceptions = JSExceptions
def _send2Redis(self, eco):
if embed():
return
if self.escalateToRedis is None:
luapath = "%s/core/errorhandling/eco.lua" % j.dirs.jsLibDir
if j.sal.fs.exists(path=luapath):
lua = j.sal.fs.fileGetContents(luapath)
self.escalateToRedis = j.core.db.register_script(lua)
if self.escalateToRedis is not None:
data = eco.toJson()
res = self.escalateToRedis(
keys=["queues:eco", "eco:incr", "eco:occurrences", "eco:objects", "eco:last"], args=[eco.key, data])
res = j.data.serializer.json.loads(res)
return res
else:
return None
@property
def blacklist(self):
if self._blacklist is None:
key = 'eco.blacklist'
if j.application.config.jumpscale.get('application').get(key):
self._blacklist = j.application.config.jumpscale.get('application').getList(key)
else:
self._blacklist = list()
return self._blacklist
def toolStripNonAsciFromText(text):
return string.join([char for char in str(text) if (
(ord(char) > 31 and ord(char) < 127) or ord(char) == 10)], "")
def setExceptHook(self):
sys.excepthook = self.excepthook
self.inException = False
def getLevelName(self, level):
return LEVELMAP.get(level, 'UNKNOWN')
def getErrorConditionObject(self, ddict={}, msg="", msgpub="", category="",
level=1, type="UNKNOWN", tb=None, tags=""):
"""
@data is dict with fields of errorcondition obj
returns only ErrorConditionObject which should be used in jumpscale to define an errorcondition (or potential error condition)
"""
errorconditionObject = ErrorConditionObject(
ddict=ddict, msg=msg, msgpub=msgpub, level=level, category=category, type=type, tb=tb, tags=tags)
return errorconditionObject
def processPythonExceptionObject(self, exceptionObject, tb=None):
"""
how to use
try:
##do something
except Exception,e:
j.errorconditionhandler.processPythonExceptionObject(e)
@param exceptionObject is errorobject thrown by python when there is an exception
@param ttype : is the description of the error, can be None
@param tb : can be a python data object for traceback, can be None
@return ecoObj
the errorcondition is then also processed e.g. send to local logserver and/or stored locally in errordb
"""
eco = self.parsePythonExceptionObject(
exceptionObject=exceptionObject, tb=tb)
eco.process()
return eco
def parsePythonExceptionObject(self, exceptionObject, tb=None):
"""
how to use
try:
##do something
except Exception,e:
eco=j.errorconditionhandler.parsePythonExceptionObject(e)
eco is jumpscale internal format for an error
next step could be to process the error objecect (eco) e.g. by eco.process()
@param exceptionObject is errorobject thrown by python when there is an exception
@param ttype : is the description of the error, can be None
@param tb : can be a python data object for traceback, can be None
@return a ErrorConditionObject object as used by jumpscale (should be the only type of object we pass around)
"""
# this allows to do raise eco
# was BaseException , dont understand (despiegk)
if isinstance(exceptionObject, ErrorConditionObject):
# return self.getErrorConditionObject(exceptionObject.eco)
return ErrorConditionObject
if not isinstance(exceptionObject, Exception):
print(
"did not receive an Exceptio object for python exception, this is serious bug.")
print("exceptionObject was:\n%s" % exceptionObject)
sys.exit(1)
if tb is None:
ttype, exc_value, tb = sys.exc_info()
if hasattr(exceptionObject, "codetrace"):
codetrace = exceptionObject.codetrace
else:
codetrace = True
if hasattr(exceptionObject, "whoami"):
whoami = exceptionObject.whoami
else:
whoami = ""
if hasattr(exceptionObject, "eco"):
eco = exceptionObject.eco
else:
eco = None
if hasattr(exceptionObject, "level"):
level = exceptionObject.level
else:
level = 1
if hasattr(exceptionObject, "actionkey"):
actionkey = exceptionObject.actionkey
else:
actionkey = ""
if hasattr(exceptionObject, "msgpub"):
msgpub = exceptionObject.msgpub
else:
msgpub = ""
if hasattr(exceptionObject, "source"):
source = exceptionObject.source
else:
source = ""
if hasattr(exceptionObject, "type"):
type = exceptionObject.type
else:
type = "UNKNOWN"
if hasattr(exceptionObject, "actionkey"):
actionkey = exceptionObject.actionkey
else:
actionkey = ""
if hasattr(exceptionObject, "message"):
message = exceptionObject.message
if j.data.types.list.check(message):
message = message[0] # @hack to let all work again
else:
message = str(exceptionObject)
if message.find("((") != -1:
tags = j.tools.code.regex.findOne("\(\(.*\)\)", message)
message.replace(tags, "")
else:
tags = ""
if hasattr(exceptionObject, "tags"):
tags = exceptionObject.tags + " %s" % tags
# if ttype!=None:
# try:
# type_str=str(ttype).split("exceptions.")[1].split("'")[0]
# except:
# type_str=str(ttype)
# else:
# type_str=""
if eco is None:
eco = self.getErrorConditionObject(
msg=message, msgpub=msgpub, level=level, tb=tb, tags=tags, type=type)
if codetrace:
# so for unknown exceptions not done through raise j.exceptions we
# will do stacktrace
eco.tracebackSet(tb, exceptionObject)
# if "message" in exceptionObject.__dict__:
# errorobject.exceptioninfo = j.data.serializer.json.dumps({'message': exceptionObject.message})
# else:
# errorobject.exceptioninfo = j.data.serializer.json.dumps({'message': str(exceptionObject)})
eco.exceptionclassname = exceptionObject.__class__.__name__
# module = inspect.getmodule(exceptionObject)
# errorobject.exceptionmodule = module.__name__ if module else None
# try:
# errorobject.funcfilename=tb.tb_frame.f_code.co_filename
# except:
# pass
# # try:
# try:
# backtrace = "~ ".join([res for res in traceback.format_exception(ttype, exceptionObject, tb)])
# if len(backtrace)>10000:
# backtrace=backtrace[:10000]
# errorobject.backtrace=backtrace
# except:
# print("ERROR in trying to get backtrace")
# except Exception,e:
# print "CRITICAL ERROR in trying to get errorobject, is BUG, please check (ErrorConditionHandler.py on line 228)"
# print "error:%s"%e
# sys.exit()
return eco
def reRaiseECO(self, eco):
if eco.exceptionmodule:
mod = imp.load_package(eco.exceptionmodule, eco.exceptionmodule)
else:
import builtins as mod
Klass = getattr(mod, eco.exceptionclassname, RuntimeError)
exc = Klass(eco.errormessage)
for key, value in list(j.data.serializer.json.loads(eco.exceptioninfo).items()):
setattr(exc, key, value)
raise exc
def excepthook(self, ttype, exceptionObject, tb):
""" every fatal error in jumpscale or by python itself will result in an exception
in this function the exception is caught.
This routine will create an errorobject & escalate to the infoserver
@ttype : is the description of the error
@tb : can be a python data object or a Event
"""
if isinstance(exceptionObject, HaltException):
j.application.stop(1)
# print "jumpscale EXCEPTIONHOOK"
if self.inException:
print(
"ERROR IN EXCEPTION HANDLING ROUTINES, which causes recursive errorhandling behavior.")
print(exceptionObject)
sys.exit(1)
return
self.inException = True
eco = self.parsePythonExceptionObject(exceptionObject, tb=tb)
self.inException = False
eco.process()
if eco.traceback != "":
print("\n**** TRACEBACK ***")
eco.printTraceback()
# print(eco)
def checkErrorIgnore(self, eco):
if j.application.debug:
ignorelist = []
else:
ignorelist = ["KeyboardInterrupt"]
for item in ignorelist:
if eco.errormessage.find(item) != -1:
return True
if j.application.appname in self.blacklist:
return True
return False
def getFrames(self, tb=None):
def _getitem_from_frame(f_locals, key, default=None):
"""
f_locals is not guaranteed to have .get(), but it will always
support __getitem__. Even if it doesnt, we return ``default``.
"""
try:
return f_locals[key]
except Exception:
return default
if tb is None:
ttype, msg, tb = sys.exc_info()
if tb is None:
frames = [(item[0], item[2]) for item in inspect.stack()]
else:
frames = []
while tb: # copied from sentry raven lib (BSD license)
# support for __traceback_hide__ which is used by a few libraries
# to hide internal frames.
f_locals = getattr(tb.tb_frame, 'f_locals', {})
if not _getitem_from_frame(f_locals, '__traceback_hide__'):
frames.append(
(tb.tb_frame, getattr(tb, 'tb_lineno', None)))
tb = tb.tb_next
frames.reverse()
result = []
ignore = ["ipython", "errorcondition", "loghandler", "errorhandling"]
for frame, linenr in frames:
name = frame.f_code.co_filename
# print "RRR:%s %s"%(name,linenr)
name = name.lower()
toignore = False
for check in ignore:
if name.find(check) != -1:
toignore = True
if not toignore:
result.append((frame, linenr))
return result
def getErrorTraceKIS(self, tb=None):
out = []
nr = 1
filename0 = "unknown"
linenr0 = 0
func0 = "unknown"
frs = self.getFrames(tb=tb)
frs.reverse()
for f, linenr in frs:
try:
code, linenr2 = inspect.findsource(f)
except Exception:
continue
start = max(linenr - 10, 0)
stop = min(linenr + 4, len(code))
code2 = "".join(code[start:stop])
finfo = inspect.getframeinfo(f)
linenr3 = linenr - start - 1
out.append((finfo.filename, finfo.function, linenr3, code2, linenr))
if nr == 1:
filename0 = finfo.filename
linenr0 = linenr
func0 = finfo.function
return out, filename0, linenr0, func0
def escalateBugToDeveloper(self, errorConditionObject, tb=None):
j.logger.enabled = False # no need to further log, there is error
tracefile = ""
def findEditorLinux():
apps = ["sublime_text", "geany", "gedit", "kate"]
for app in apps:
try:
if j.system.unix.checkApplicationInstalled(app):
editor = app
return editor
except:
pass
return "less"
if False and j.application.interactive:
editor = None
if j.core.platformtype.myplatform.isLinux():
#j.tools.console.echo("THIS ONLY WORKS WHEN GEDIT IS INSTALLED")
editor = findEditorLinux()
elif j.core.platformtype.myplatform.isWindows():
editorPath = j.sal.fs.joinPaths(
j.dirs.base, "apps", "wscite", "scite.exe")
if j.sal.fs.exists(editorPath):
editor = editorPath
tracefile = errorConditionObject.log2filesystem()
# print "EDITOR FOUND:%s" % editor
if editor:
# print errorConditionObject.errormessagepublic
if tb is None:
try:
res = j.tools.console.askString(
"\nAn error has occurred. Do you want do you want to do? (s=stop, c=continue, t=getTrace)")
except:
# print "ERROR IN ASKSTRING TO SEE IF WE HAVE TO USE
# EDITOR"
res = "s"
else:
try:
res = j.tools.console.askString(
"\nAn error has occurred. Do you want do you want to do? (s=stop, c=continue, t=getTrace, d=debug)")
except:
# print "ERROR IN ASKSTRING TO SEE IF WE HAVE TO USE
# EDITOR"
res = "s"
if res == "t":
cmd = "%s '%s'" % (editor, tracefile)
# print "EDITORCMD: %s" %cmd
if editor == "less":
j.sal.process.executeWithoutPipe(cmd, die=False)
else:
result, out = j.sal.process.execute(
cmd, die=False, outputToStdout=False)
j.logger.clear()
if res == "c":
return
elif res == "d":
j.tools.console.echo(
"Starting pdb, exit by entering the command 'q'")
import pdb
pdb.post_mortem(tb)
elif res == "s":
# print errorConditionObject
j.application.stop(1)
else:
# print errorConditionObject
res = j.tools.console.askString(
"\nAn error has occurred. Do you want do you want to do? (s=stop, c=continue, d=debug)")
j.logger.clear()
if res == "c":
return
elif res == "d":
j.tools.console.echo(
"Starting pdb, exit by entering the command 'q'")
import pdb
pdb.post_mortem()
elif res == "s":
# print eobject
j.application.stop(1)
else:
# print "ERROR"
# tracefile=eobject.log2filesystem()
# print errorConditionObject
#j.tools.console.echo( "Tracefile in %s" % tracefile)
j.application.stop(1)
def halt(self, msg, eco):
if eco is not None:
eco = eco.__dict__
raise HaltException(msg, eco)
def raiseWarning(self, message, msgpub="", tags="", level=4):
"""
@param message is the error message which describes the state
@param msgpub is message we want to show to endcustomers (can include a solution)
"""
eco = j.errorconditionhandler.getErrorConditionObject(
ddict={}, msg=message, msgpub=msgpub, category='', level=level, type='WARNING')
eco.process()
| |
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Parses the command line, discovers the appropriate tests, and runs them.
Handles test configuration, but all the logic for
actually running the test is in Test and PageRunner."""
import copy
import inspect
import json
import optparse
import os
import sys
from telemetry import test
from telemetry.core import browser_options
from telemetry.core import discover
from telemetry.core import util
class Command(object):
usage = ''
@property
def name(self):
return self.__class__.__name__.lower()
@property
def description(self):
return self.__doc__
def CreateParser(self):
return optparse.OptionParser('%%prog %s %s' % (self.name, self.usage))
def AddCommandLineOptions(self, parser):
pass
def ProcessCommandLine(self, parser, options, args):
pass
def Run(self, options, args):
raise NotImplementedError()
class Help(Command):
"""Display help information"""
def Run(self, options, args):
print >> sys.stderr, ('usage: %s <command> [<options>]' % _GetScriptName())
print >> sys.stderr, 'Available commands are:'
for command in COMMANDS:
print >> sys.stderr, ' %-10s %s' % (command.name, command.description)
return 0
class List(Command):
"""Lists the available tests"""
usage = '[test_name] [<options>]'
def __init__(self):
super(List, self).__init__()
self._tests = None
def AddCommandLineOptions(self, parser):
parser.add_option('-j', '--json', action='store_true')
def ProcessCommandLine(self, parser, options, args):
if not args:
self._tests = _GetTests()
elif len(args) == 1:
self._tests = _MatchTestName(args[0])
else:
parser.error('Must provide at most one test name.')
def Run(self, options, args):
if options.json:
test_list = []
for test_name, test_class in sorted(self._tests.items()):
test_list.append({
'name': test_name,
'description': test_class.__doc__,
'enabled': test_class.enabled,
'options': test_class.options,
})
print json.dumps(test_list)
else:
print >> sys.stderr, 'Available tests are:'
_PrintTestList(self._tests)
return 0
class Run(Command):
"""Run one or more tests"""
usage = 'test_name [<options>]'
def __init__(self):
super(Run, self).__init__()
self._test = None
def CreateParser(self):
options = browser_options.BrowserFinderOptions()
parser = options.CreateParser('%%prog %s %s' % (self.name, self.usage))
return parser
def AddCommandLineOptions(self, parser):
test.Test.AddCommandLineOptions(parser)
# Allow tests to add their own command line options.
matching_tests = {}
for arg in sys.argv[1:]:
matching_tests.update(_MatchTestName(arg))
for test_class in matching_tests.itervalues():
test_class.AddTestCommandLineOptions(parser)
def ProcessCommandLine(self, parser, options, args):
if len(args) != 1:
parser.error('Must provide one test name.')
input_test_name = args[0]
matching_tests = _MatchTestName(input_test_name)
if not matching_tests:
print >> sys.stderr, 'No test named "%s".' % input_test_name
print >> sys.stderr
print >> sys.stderr, 'Available tests:'
_PrintTestList(_GetTests())
sys.exit(1)
if len(matching_tests) > 1:
print >> sys.stderr, 'Multiple tests named "%s".' % input_test_name
print >> sys.stderr
print >> sys.stderr, 'Did you mean one of these?'
_PrintTestList(matching_tests)
sys.exit(1)
self._test = matching_tests.popitem()[1]
def Run(self, options, args):
return min(255, self._test().Run(copy.copy(options)))
COMMANDS = [cls() for _, cls in inspect.getmembers(sys.modules[__name__])
if inspect.isclass(cls)
and cls is not Command and issubclass(cls, Command)]
def _GetScriptName():
return os.path.basename(sys.argv[0])
def _GetTests():
# Lazy load and cache results.
if not hasattr(_GetTests, 'tests'):
base_dir = util.GetBaseDir()
tests = discover.DiscoverClasses(base_dir, base_dir, test.Test,
index_by_class_name=True)
tests = dict((test.GetName(), test) for test in tests.itervalues())
_GetTests.tests = tests
return _GetTests.tests
def _MatchTestName(input_test_name):
def _Matches(input_string, search_string):
if search_string.startswith(input_string):
return True
for part in search_string.split('.'):
if part.startswith(input_string):
return True
return False
return dict((test_name, test_class)
for test_name, test_class in _GetTests().iteritems()
if _Matches(input_test_name, test_name))
def _PrintTestList(tests):
for test_name, test_class in sorted(tests.items()):
if test_class.__doc__:
description = test_class.__doc__.splitlines()[0]
# Align the test names to the longest one.
format_string = ' %%-%ds %%s' % max(map(len, tests.iterkeys()))
print >> sys.stderr, format_string % (test_name, description)
else:
print >> sys.stderr, ' %s' % test_name
def Main():
# Get the command name from the command line.
if len(sys.argv) > 1 and sys.argv[1] == '--help':
sys.argv[1] = 'help'
command_name = 'run'
for arg in sys.argv[1:]:
if not arg.startswith('-'):
command_name = arg
break
# Validate and interpret the command name.
commands = [command for command in COMMANDS
if command.name.startswith(command_name)]
if len(commands) > 1:
print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
% (command_name, _GetScriptName()))
for command in commands:
print >> sys.stderr, ' %-10s %s' % (command.name, command.description)
return 1
if commands:
command = commands[0]
else:
command = Run()
# Parse and run the command.
parser = command.CreateParser()
command.AddCommandLineOptions(parser)
options, args = parser.parse_args()
if commands:
args = args[1:]
command.ProcessCommandLine(parser, options, args)
return command.Run(options, args)
| |
from . import util as testutil
from sqlalchemy import pool, orm, util
from sqlalchemy.engine import default, create_engine
from sqlalchemy import exc as sa_exc
from sqlalchemy.util import decorator
from sqlalchemy import types as sqltypes, schema
import warnings
import re
from .warnings import resetwarnings
from .exclusions import db_spec, _is_excluded
from . import assertsql
from . import config
import itertools
from .util import fail
import contextlib
def emits_warning(*messages):
"""Mark a test as emitting a warning.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
"""
# TODO: it would be nice to assert that a named warning was
# emitted. should work with some monkeypatching of warnings,
# and may work on non-CPython if they keep to the spirit of
# warnings.showwarning's docstring.
# - update: jython looks ok, it uses cpython's module
@decorator
def decorate(fn, *args, **kw):
# todo: should probably be strict about this, too
filters = [dict(action='ignore',
category=sa_exc.SAPendingDeprecationWarning)]
if not messages:
filters.append(dict(action='ignore',
category=sa_exc.SAWarning))
else:
filters.extend(dict(action='ignore',
message=message,
category=sa_exc.SAWarning)
for message in messages)
for f in filters:
warnings.filterwarnings(**f)
try:
return fn(*args, **kw)
finally:
resetwarnings()
return decorate
def emits_warning_on(db, *warnings):
"""Mark a test as emitting a warning on a specific dialect.
With no arguments, squelches all SAWarning failures. Or pass one or more
strings; these will be matched to the root of the warning description by
warnings.filterwarnings().
"""
spec = db_spec(db)
@decorator
def decorate(fn, *args, **kw):
if isinstance(db, str):
if not spec(config.db):
return fn(*args, **kw)
else:
wrapped = emits_warning(*warnings)(fn)
return wrapped(*args, **kw)
else:
if not _is_excluded(*db):
return fn(*args, **kw)
else:
wrapped = emits_warning(*warnings)(fn)
return wrapped(*args, **kw)
return decorate
def uses_deprecated(*messages):
"""Mark a test as immune from fatal deprecation warnings.
With no arguments, squelches all SADeprecationWarning failures.
Or pass one or more strings; these will be matched to the root
of the warning description by warnings.filterwarnings().
As a special case, you may pass a function name prefixed with //
and it will be re-written as needed to match the standard warning
verbiage emitted by the sqlalchemy.util.deprecated decorator.
"""
@decorator
def decorate(fn, *args, **kw):
# todo: should probably be strict about this, too
filters = [dict(action='ignore',
category=sa_exc.SAPendingDeprecationWarning)]
if not messages:
filters.append(dict(action='ignore',
category=sa_exc.SADeprecationWarning))
else:
filters.extend(
[dict(action='ignore',
message=message,
category=sa_exc.SADeprecationWarning)
for message in
[(m.startswith('//') and
('Call to deprecated function ' + m[2:]) or m)
for m in messages]])
for f in filters:
warnings.filterwarnings(**f)
try:
return fn(*args, **kw)
finally:
resetwarnings()
return decorate
def global_cleanup_assertions():
"""Check things that have to be finalized at the end of a test suite.
Hardcoded at the moment, a modular system can be built here
to support things like PG prepared transactions, tables all
dropped, etc.
"""
testutil.lazy_gc()
assert not pool._refs, str(pool._refs)
def eq_(a, b, msg=None):
"""Assert a == b, with repr messaging on failure."""
assert a == b, msg or "%r != %r" % (a, b)
def ne_(a, b, msg=None):
"""Assert a != b, with repr messaging on failure."""
assert a != b, msg or "%r == %r" % (a, b)
def is_(a, b, msg=None):
"""Assert a is b, with repr messaging on failure."""
assert a is b, msg or "%r is not %r" % (a, b)
def is_not_(a, b, msg=None):
"""Assert a is not b, with repr messaging on failure."""
assert a is not b, msg or "%r is %r" % (a, b)
def startswith_(a, fragment, msg=None):
"""Assert a.startswith(fragment), with repr messaging on failure."""
assert a.startswith(fragment), msg or "%r does not start with %r" % (
a, fragment)
def assert_raises(except_cls, callable_, *args, **kw):
try:
callable_(*args, **kw)
success = False
except except_cls:
success = True
# assert outside the block so it works for AssertionError too !
assert success, "Callable did not raise an exception"
def assert_raises_message(except_cls, msg, callable_, *args, **kwargs):
try:
callable_(*args, **kwargs)
assert False, "Callable did not raise an exception"
except except_cls as e:
assert re.search(msg, str(e), re.UNICODE), "%r !~ %s" % (msg, e)
print(str(e).encode('utf-8'))
class AssertsCompiledSQL(object):
def assert_compile(self, clause, result, params=None,
checkparams=None, dialect=None,
checkpositional=None,
use_default_dialect=False,
allow_dialect_select=False):
if use_default_dialect:
dialect = default.DefaultDialect()
elif dialect == None and not allow_dialect_select:
dialect = getattr(self, '__dialect__', None)
if dialect == 'default':
dialect = default.DefaultDialect()
elif dialect is None:
dialect = config.db.dialect
elif isinstance(dialect, str):
dialect = create_engine("%s://" % dialect).dialect
kw = {}
if params is not None:
kw['column_keys'] = list(params.keys())
if isinstance(clause, orm.Query):
context = clause._compile_context()
context.statement.use_labels = True
clause = context.statement
c = clause.compile(dialect=dialect, **kw)
param_str = repr(getattr(c, 'params', {}))
# start Py3K
param_str = param_str.encode('utf-8').decode('ascii', 'ignore')
# end Py3K
print("\nSQL String:\n" + str(c) + param_str)
cc = re.sub(r'[\n\t]', '', str(c))
eq_(cc, result, "%r != %r on dialect %r" % (cc, result, dialect))
if checkparams is not None:
eq_(c.construct_params(params), checkparams)
if checkpositional is not None:
p = c.construct_params(params)
eq_(tuple([p[x] for x in c.positiontup]), checkpositional)
class ComparesTables(object):
def assert_tables_equal(self, table, reflected_table, strict_types=False):
assert len(table.c) == len(reflected_table.c)
for c, reflected_c in zip(table.c, reflected_table.c):
eq_(c.name, reflected_c.name)
assert reflected_c is reflected_table.c[c.name]
eq_(c.primary_key, reflected_c.primary_key)
eq_(c.nullable, reflected_c.nullable)
if strict_types:
msg = "Type '%s' doesn't correspond to type '%s'"
assert type(reflected_c.type) is type(c.type), \
msg % (reflected_c.type, c.type)
else:
self.assert_types_base(reflected_c, c)
if isinstance(c.type, sqltypes.String):
eq_(c.type.length, reflected_c.type.length)
eq_(
set([f.column.name for f in c.foreign_keys]),
set([f.column.name for f in reflected_c.foreign_keys])
)
if c.server_default:
assert isinstance(reflected_c.server_default,
schema.FetchedValue)
assert len(table.primary_key) == len(reflected_table.primary_key)
for c in table.primary_key:
assert reflected_table.primary_key.columns[c.name] is not None
def assert_types_base(self, c1, c2):
assert c1.type._compare_type_affinity(c2.type),\
"On column %r, type '%s' doesn't correspond to type '%s'" % \
(c1.name, c1.type, c2.type)
class AssertsExecutionResults(object):
def assert_result(self, result, class_, *objects):
result = list(result)
print(repr(result))
self.assert_list(result, class_, objects)
def assert_list(self, result, class_, list):
self.assert_(len(result) == len(list),
"result list is not the same size as test list, " +
"for class " + class_.__name__)
for i in range(0, len(list)):
self.assert_row(class_, result[i], list[i])
def assert_row(self, class_, rowobj, desc):
self.assert_(rowobj.__class__ is class_,
"item class is not " + repr(class_))
for key, value in desc.items():
if isinstance(value, tuple):
if isinstance(value[1], list):
self.assert_list(getattr(rowobj, key), value[0], value[1])
else:
self.assert_row(value[0], getattr(rowobj, key), value[1])
else:
self.assert_(getattr(rowobj, key) == value,
"attribute %s value %s does not match %s" % (
key, getattr(rowobj, key), value))
def assert_unordered_result(self, result, cls, *expected):
"""As assert_result, but the order of objects is not considered.
The algorithm is very expensive but not a big deal for the small
numbers of rows that the test suite manipulates.
"""
class immutabledict(dict):
def __hash__(self):
return id(self)
found = util.IdentitySet(result)
expected = set([immutabledict(e) for e in expected])
for wrong in itertools.filterfalse(lambda o: type(o) == cls, found):
fail('Unexpected type "%s", expected "%s"' % (
type(wrong).__name__, cls.__name__))
if len(found) != len(expected):
fail('Unexpected object count "%s", expected "%s"' % (
len(found), len(expected)))
NOVALUE = object()
def _compare_item(obj, spec):
for key, value in spec.items():
if isinstance(value, tuple):
try:
self.assert_unordered_result(
getattr(obj, key), value[0], *value[1])
except AssertionError:
return False
else:
if getattr(obj, key, NOVALUE) != value:
return False
return True
for expected_item in expected:
for found_item in found:
if _compare_item(found_item, expected_item):
found.remove(found_item)
break
else:
fail(
"Expected %s instance with attributes %s not found." % (
cls.__name__, repr(expected_item)))
return True
def assert_sql_execution(self, db, callable_, *rules):
assertsql.asserter.add_rules(rules)
try:
callable_()
assertsql.asserter.statement_complete()
finally:
assertsql.asserter.clear_rules()
def assert_sql(self, db, callable_, list_, with_sequences=None):
if with_sequences is not None and config.db.dialect.supports_sequences:
rules = with_sequences
else:
rules = list_
newrules = []
for rule in rules:
if isinstance(rule, dict):
newrule = assertsql.AllOf(*[
assertsql.ExactSQL(k, v) for k, v in rule.items()
])
else:
newrule = assertsql.ExactSQL(*rule)
newrules.append(newrule)
self.assert_sql_execution(db, callable_, *newrules)
def assert_sql_count(self, db, callable_, count):
self.assert_sql_execution(
db, callable_, assertsql.CountStatements(count))
@contextlib.contextmanager
def assert_execution(self, *rules):
assertsql.asserter.add_rules(rules)
try:
yield
assertsql.asserter.statement_complete()
finally:
assertsql.asserter.clear_rules()
def assert_statement_count(self, count):
return self.assert_execution(assertsql.CountStatements(count))
| |
#coding: utf-8
from fabtools import require, git, python, nginx, supervisor, service, files
from fabric.context_managers import cd, shell_env
from fabric.api import put, run, task, env
from os import environ, path
import time, re
from .dash import restart_stats_workers
@task
def test_uwsgi_is_started(now):
for i in range(1, 30):
status = supervisor.process_status('uwsgi_{}'.format(now))
if status == 'RUNNING':
break
time.sleep(1)
testing_file = '/tmp/test_uwsgi.py'
if files.is_file(testing_file):
files.remove(testing_file)
put('files/test_uwsgi.py', '/tmp/')
require.python.package('six', use_sudo=True)
output = run('python {} {} {} aa'.format(testing_file, env.uwsgi_socket_api(now),
'{}/ads/'.format(env.conf_api.SERVER_NAME)))
assert '"message"' in output
from test_api import test_api
test_api(testing_file, env.uwsgi_socket_api(now), env.conf_api.SERVER_NAME)
def install_swagger_ui():
with cd('~'):
if not files.exists('APITaxi_swagger'):
git.clone('https://github.com/openmaraude/APITaxi_swagger')
git.checkout('APITaxi_swagger')
git.pull('APITaxi_swagger')
return path.join(run('pwd'), 'APITaxi_swagger')
def install_zupc_cache():
with cd('~'):
p = path.join(run('pwd'), 'zupc', 'zupc')
require.files.directory(p, use_sudo=True)
require.files.file(path.join(p, "index.html"),
source="files/zupc.html", use_sudo=True)
return p
def deploy_nginx_api_site(now):
files.upload_template('templates/uwsgi.ini', env.uwsgi_api_config_path(now),
context={
'config_path': env.apitaxi_config_path(now),
'api_path': env.apitaxi_dir(now),
'venv_path': env.apitaxi_venv_path(now),
'uwsgi_file': env.uwsgi_api_file(now),
'uwsgi_pid_file': env.uwsgi_api_pid_file(now),
'uwsgi_log_file1': env.uwsgi_logdir + '/api_launcher.log',
'uwsgi_log_file2': env.uwsgi_logdir + '/api_uwsgi.log',
'uwsgi_launcher_logdir': env.uwsgi_launcher_logdir,
'socket': env.uwsgi_socket_api(now),
'processes': env.wsgi_processes,
'threads': env.wsgi_threads,
'now': now
}
)
files.upload_template('templates/uwsgi.ini', env.uwsgi_front_config_path(now),
context={
'config_path': env.fronttaxi_config_path(now),
'api_path': env.fronttaxi_dir(now),
'venv_path': env.apitaxi_venv_path(now),
'uwsgi_file': env.uwsgi_front_file(now),
'uwsgi_pid_file': env.uwsgi_front_pid_file(now),
'uwsgi_log_file1': env.uwsgi_logdir + '/front_launcher.log',
'uwsgi_log_file2': env.uwsgi_logdir + '/front_uwsgi.log',
'socket': env.uwsgi_socket_front(now),
'processes': env.wsgi_processes,
'threads': env.wsgi_threads,
'now': now
}
)
uwsgi = path.join(env.apitaxi_venv_path(now), 'bin', 'uwsgi')
require.supervisor.process('uwsgi_api_{}'.format(now),
command='{} --ini {}'.format(uwsgi, env.uwsgi_api_config_path(now)),
directory=env.apitaxi_venv_path(now),
stdout_logfile = '/var/log/nginx/apitaxi.log',
user='www-data'
)
require.supervisor.process('uwsgi_front_{}'.format(now),
command='{} --ini {}'.format(uwsgi, env.uwsgi_front_config_path(now)),
directory=env.apitaxi_venv_path(now),
stdout_logfile = '/var/log/nginx/fronttaxi.log',
user='www-data'
)
test_uwsgi_is_started(now)
celery = path.join(env.apitaxi_venv_path(now), 'bin', 'celery')
worker_name = 'send_hail_{}'.format(now)
command = '{} worker --app=celery_worker.celery -Q {} -n {} --workdir={}'
require.supervisor.process(worker_name,
command=command.format(celery, worker_name, worker_name, env.apitaxi_dir(now)),
directory=env.apitaxi_dir(now),
stdout_logfile='/var/log/celery/send_hail.log',
user='www-data',
environment='APITAXI_CONFIG_FILE=prod_settings.py'
)
swagger_dir = install_swagger_ui()
zupc_dir = install_zupc_cache()
require.nginx.site('apitaxi',
template_source='templates/nginx_site.conf',
domain_name=getattr(env.conf_api, 'HOST', 'localhost'),
env='NOW={}'.format(now),
port=getattr(env.conf_api, 'PORT', 80),
socket_api=env.uwsgi_socket_api(now),
socket_front=env.uwsgi_socket_front(now),
doc_dir=swagger_dir,
zupc_cache_dir=zupc_dir
)
path_redis = '{}/redis.sh'.format(env.deployment_dir(now))
require.files.template_file(path=path_redis,
template_source='templates/redis.sh',
context={'deployment_dir':env.deployment_dir(now)},
mode='770')
require.supervisor.process('redis',
command=path_redis,
stdout_logfile='/var/log/redis/error.log'
)
def clean_directories(now):
l = run('for i in {}/deployment_*; do echo $i; done'.format(env.deploy_dir)).split("\n")
for d in [d.replace('\r', '') for d in l]:
if not files.is_dir(d):
continue
if d == env.deployment_dir(now):
continue
files.remove(d, recursive=True)
l = run('for i in {}/apitaxi_*; do echo $i; done'.format(env.uwsgi_socket_dir)).split("\n")
for f in [f.replace('\r', '') for f in l]:
if f == env.uwsgi_socket_api(now):
continue
files.remove(f, use_sudo=True)
#The pid file should be remove when the process stops
def stop_old_processes(now):
def stop_process(name, visitor):
l = run('for i in /etc/supervisor/conf.d/{}_*; do echo $i; done'.format(name)).split("\n")
for f in [f.replace('\r', '') for f in l]:
print 'To remove: {}'.format(f)
if str(now) in f:
continue
file_ = f.split('/')[-1]
process = file_[:-len('.conf')]
visitor(process)
files.remove(f, use_sudo=True)
stop_process('uwsgi', lambda p:supervisor.stop_process(p))
def stop_queues(process):
#Request' status is failure after 15 secs in received
#So even if queue is not empty we can shutdown the process
for i in range(1, 17):
res = run('python manage.py active_tasks {}'.format(process))
if res == '':
break
time.sleep(1)
supervisor.stop_process(process)
with cd(env.apitaxi_dir(now)):
with python.virtualenv(env.apitaxi_venv_path(now)),\
shell_env(APITAXI_CONFIG_FILE=env.apitaxi_config_path(now)):
stop_process('send_hail', stop_queues)
def deploy_front(now):
with cd(env.deployment_dir(now)):
run(u'wget {} -O front.zip'.format(env.fronttaxi_archive))
run('unzip front.zip')
with cd(env.fronttaxi_dir(now)), python.virtualenv(env.apitaxi_venv_path(now)):
python.install_requirements('requirements.txt')
put(environ['APITAXI_CONFIG_FILE'], env.fronttaxi_config_path(now))
def get_admin_key():
return run(
"""psql {} -tAc 'SELECT apikey FROM "user" where email='"'"'admin'"'"';'"""\
.format(env.conf_api.SQLALCHEMY_DATABASE_URI))
def install_admin_user():
if len(get_admin_key()) > 0:
return
run('python manage.py create_admin admin')
@task
def deploy_api(commit='master'):
now = int(time.time())
require.files.directory(env.deployment_dir(now))
with cd(env.deployment_dir(now)):
run(u'wget {}'.format(env.apitaxi_archive.format(commit)))
run('unzip {}.zip'.format(commit))
if commit != 'master':
run('mv APITaxi-{} APITaxi-master'.format(commit))
with cd(env.apitaxi_dir(now)):
require.python.virtualenv(env.apitaxi_venv_path(now), venv_python="python3")
with python.virtualenv(env.apitaxi_venv_path(now)):
python.install_pip(use_sudo=False)
require.python.package('uwsgi')
python.install_requirements('requirements.txt')
put(environ['APITAXI_CONFIG_FILE'], env.apitaxi_config_path(now))
with shell_env(APITAXI_CONFIG_FILE=env.apitaxi_config_path(now)):
for i in range(1, 30):
if service.is_running('supervisor'):
break
time.sleep(1)
run('python manage.py db upgrade')
install_admin_user()
deploy_front(now)
deploy_nginx_api_site(now)
if not service.is_running('nginx'):
service.start('nginx')
clean_directories(now)
stop_old_processes(now)
restart_stats_workers(now)
| |
from unittest2 import TestCase
from lxml import etree
from babelsubs.storage import get_contents, SubtitleSet, TTS_NAMESPACE_URI
from babelsubs.generators.srt import SRTGenerator
from babelsubs.parsers import SubtitleParserError
from babelsubs.parsers.srt import SRTParser
from babelsubs.tests import utils
import babelsubs
class SRTParsingTest(TestCase):
def test_basic(self):
subs = utils.get_subs("simple.srt")
self.assertEquals(len(subs), 19)
def test_internal_format(self):
subs = utils.get_subs("simple.srt")
parsed = subs.to_internal()
sub_data = [x for x in parsed.subtitle_items(SRTGenerator.MAPPINGS)]
self.assertEquals(sub_data[0].start_time, 4)
self.assertEquals(sub_data[0].end_time, 2093)
self.assertEquals(sub_data[0].text, "We started <b>Universal Subtitles</b> because we believe")
def test_round_trip(self):
subs1 = utils.get_subs("simple.srt")
parsed1 = subs1.to_internal()
srt_ouput = unicode(SRTGenerator(parsed1))
subs2 = SRTParser(srt_ouput, 'en')
parsed2 = subs2.to_internal()
self.assertEquals(len(subs1), len(subs2))
for x1, x2 in zip([x for x in parsed1.subtitle_items(SRTGenerator.MAPPINGS)], \
[x for x in parsed2.subtitle_items(SRTGenerator.MAPPINGS)]):
self.assertEquals(x1, x2)
def test_timed_data_parses_correctly(self):
subs = utils.get_data_file_path('timed_text.srt')
parsed = babelsubs.load_from_file(subs, type='srt', language='en')
self.assertNotEquals(parsed, None)
try:
srt = parsed.to('srt')
self.assertNotEquals(srt, None)
except Exception, e:
self.fail(e)
def test_curly_brackets(self):
subs = utils.get_subs("curly_brackets.srt")
parsed = subs.to_internal()
sub_data = list(parsed.subtitle_items(SRTGenerator.MAPPINGS))
self.assertEquals(len(sub_data), 1)
self.assertEquals(sub_data[0].text, "{ a } {{ b }} c")
def test_formatting(self):
subs = u"""1
00:00:00,004 --> 00:00:02,093
We\n started <b>Universal Subtitles</b> <i>because</i> we <u>believe</u>
"""
parsed = SRTParser(subs, 'en')
internal = parsed.to_internal()
self.assertEquals(len(parsed), 1)
element = internal.get_subtitles()[0]
self.assertEquals(len(element.getchildren()), 4)
br, bold, italics, underline = element.getchildren()
self.assertEquals(br.text, None)
self.assertEquals(' started ', br.tail)
self.assertEquals(br.tag, '{http://www.w3.org/ns/ttml}br')
self.assertEquals(bold.text, 'Universal Subtitles')
self.assertEquals(bold.tail, ' ')
self.assertEquals(bold.tag, '{http://www.w3.org/ns/ttml}span')
self.assertIn('{%s}fontWeight' % TTS_NAMESPACE_URI, bold.attrib)
self.assertEquals(bold.attrib['{%s}fontWeight' % TTS_NAMESPACE_URI], 'bold')
self.assertEquals(italics.text, 'because')
self.assertEquals(italics.tail, ' we ')
self.assertEquals(italics.tag, '{http://www.w3.org/ns/ttml}span')
self.assertIn('{%s}fontStyle' % TTS_NAMESPACE_URI, italics.attrib)
self.assertEquals(italics.attrib['{%s}fontStyle' % TTS_NAMESPACE_URI], 'italic')
self.assertEquals(underline.text, 'believe')
self.assertEquals(underline.tail, None)
self.assertEquals(underline.tag, '{http://www.w3.org/ns/ttml}span')
self.assertIn('{%s}textDecoration' % TTS_NAMESPACE_URI, underline.attrib)
self.assertEquals(underline.attrib['{%s}textDecoration' % TTS_NAMESPACE_URI], 'underline')
output = unicode(SRTGenerator(internal))
parsed2 = SRTParser(output, 'en')
internal2 = parsed2.to_internal()
for x1, x2 in zip([x for x in internal.subtitle_items(SRTGenerator.MAPPINGS)], \
[x for x in internal2.subtitle_items(SRTGenerator.MAPPINGS)]):
self.assertEquals(x1, x2)
def test_speaker_change(self):
subs = """1
00:00:00,004 --> 00:00:02,093
And know, Mr. <b>Amara</b> will talk.\n >> Hello, and welcome.
"""
parsed = SRTParser(subs, 'en')
internal = parsed.to_internal()
self.assertEquals(len(parsed), 1)
element = internal.get_subtitles()[0]
self.assertTrue(len(element.getchildren()), 2)
self.assertEquals(get_contents(element), 'And know, Mr. Amara will talk. >> Hello, and welcome.')
self.assertEquals(etree.tostring(element).strip(),
'<p xmlns="http://www.w3.org/ns/ttml" xmlns:tts="http://www.w3.org/ns/ttml#styling" begin="00:00:00.004" end="00:00:02.093">And know, Mr. <span tts:fontWeight="bold">Amara</span> will talk.<br/> >> Hello, and welcome.</p>')
self.assertEquals(element.getchildren()[1].tail, ' >> Hello, and welcome.')
output = unicode(SRTGenerator(internal))
parsed2 = SRTParser(output, 'en')
internal2 = parsed2.to_internal()
for x1, x2 in zip([x for x in internal.subtitle_items(SRTGenerator.MAPPINGS)], \
[x for x in internal2.subtitle_items(SRTGenerator.MAPPINGS)]):
self.assertEquals(x1, x2)
def test_ampersand_escaping(self):
subs = utils.get_subs("simple.srt")
parsed = subs.to_internal()
sub_data = [x for x in parsed.subtitle_items(SRTGenerator.MAPPINGS)]
self.assertEquals(sub_data[16].text, "such as MP4, theora, webM and <i>&</i> HTML 5.")
def test_unsynced_generator(self):
subs = SubtitleSet('en')
for x in xrange(0,5):
subs.append_subtitle(None, None,"%s" % x)
output = unicode(SRTGenerator(subs))
parsed = SRTParser(output,'en')
internal = parsed.to_internal()
subs = [x for x in internal.subtitle_items()]
self.assertEqual(len(internal), 5)
for i,sub in enumerate(subs):
self.assertEqual(sub.start_time, None)
self.assertEqual(sub.end_time, None)
generated = SRTGenerator(internal)
self.assertEqual(generated.format_time(None), u'99:59:59,999')
self.assertIn(u'''1\r\n99:59:59,999 --> 99:59:59,999\r\n0\r\n\r\n2\r\n99:59:59,999 --> 99:59:59,999\r\n1\r\n\r\n3\r\n99:59:59,999 --> 99:59:59,999\r\n2\r\n\r\n4\r\n99:59:59,999 --> 99:59:59,999\r\n3\r\n\r\n5\r\n99:59:59,999 --> 99:59:59,999\r\n4\r\n''',
unicode(generated))
def test_invalid(self):
with self.assertRaises(SubtitleParserError):
SRTParser ("this\n\nisnot a valid subs format","en")
def test_mixed_newlines(self):
# some folks will have valid srts, then edit them on an editor
# that will save line breaks on the current platform separator
# e.g. \n on unix , \r...
# make sure we normalize this stuff
subs = utils.get_subs("Untimed_text.srt")
parsed = subs.to_internal()
self.assertEqual(len(subs), 43)
# second sub should have a line break
self.assertIn('<p begin="99:59:59.000" end="99:59:59.000">I\'m gutted. <br/>Absolutely gutted.</p>',
parsed.to_xml())
def test_complex_formatting(self):
# this is the srt used in our selenium tests
subs = utils.get_subs("Timed_en.srt")
self.assertEqual(len(subs), 72)
class SRTGeneratorTest(TestCase):
def setUp(self):
self.dfxp = utils.get_subs("with-formatting.dfxp").to_internal()
self.subs = self.dfxp.subtitle_items(mappings=SRTGenerator.MAPPINGS)
def test_generated_formatting(self):
self.assertEqual(self.subs[2].text,'It has <b>bold</b> formatting' )
self.assertEqual(self.subs[3].text,'It has <i>italics</i> too' )
self.assertEqual(self.subs[4].text,'And why not <u>underline</u>' )
self.assertEqual(self.subs[5].text,'It has a html tag <a> should be in brackets' )
self.assertEqual(self.subs[6].text,'It has speaker changes >>>' )
class SRTMultiLines(TestCase):
def setUp(self):
self.dfxp = utils.get_subs("multiline-italics.dfxp").to_internal()
def test_two_line_italics(self):
"""Line break inside italics. """
expected = """<i>multi-line\nitalicized</i>"""
els = self.dfxp.get_subtitles()
self.assertEqual(expected,
self.dfxp.get_content_with_markup(els[2],
mappings=SRTGenerator.MAPPINGS))
def test_italics_after_linebreak(self):
"""3 lines with italicized 2nd and 3rd. """
expected = ("this is the first line\n<i>multi-line\n"
"italicized second and third</i>")
els = self.dfxp.get_subtitles()
self.assertEqual(expected,
self.dfxp.get_content_with_markup(els[3],
mappings=SRTGenerator.MAPPINGS))
def test_italics_before_linebreak(self):
"""italicized lines followed by linebreak and regular text."""
expected = ("<i>italicized</i>\nno italics last line")
els = self.dfxp.get_subtitles()
self.assertEqual(expected,
self.dfxp.get_content_with_markup(els[4],
mappings=SRTGenerator.MAPPINGS))
def test_linebreak_no_italics(self):
"""Linebreak with no italics"""
expected = ('this is line 1 \nthis is line 2')
els = self.dfxp.get_subtitles()
self.assertEqual(expected,
self.dfxp.get_content_with_markup(els[5],
mappings=SRTGenerator.MAPPINGS))
def test_linebreak_before_italics(self):
"""linebreak before italics. """
expected = ('this is line 1 \n<i>italicized</i>\nno italics last line')
els = self.dfxp.get_subtitles()
self.assertEqual(expected,
self.dfxp.get_content_with_markup(els[6],
mappings=SRTGenerator.MAPPINGS))
def test_linebreak_in_nested_tags(self):
"""italicized lines followed by linebreak and regular text."""
expected = ("this is line 1 \n<i>italicized <b>this is bold and italics</b></i>\nno italics last line")
els = self.dfxp.get_subtitles()
self.assertEqual(expected,
self.dfxp.get_content_with_markup(els[7],
mappings=SRTGenerator.MAPPINGS))
| |
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
from __future__ import division
from functools import partial
import warnings
import numpy as np
from scipy import linalg
from scipy.sparse import issparse, csr_matrix
from . import check_random_state
from .fixes import np_version
from ._logistic_sigmoid import _log_logistic_sigmoid
from ..externals.six.moves import xrange
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
from ..exceptions import NonBLASDotWarning
def norm(x):
"""Compute the Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). More precise than sqrt(squared_norm(x)).
"""
x = np.asarray(x)
nrm2, = linalg.get_blas_funcs(['nrm2'], [x])
return nrm2(x)
# Newer NumPy has a ravel that needs less copying.
if np_version < (1, 7, 1):
_ravel = np.ravel
else:
_ravel = partial(np.ravel, order='K')
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Returns the Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array). Faster than norm(x) ** 2.
"""
x = _ravel(x)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
"""
if issparse(X):
if not isinstance(X, csr_matrix):
X = csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum('ij,ij->i', X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def _impose_f_order(X):
"""Helper Function"""
# important to access flags instead of calling np.isfortran,
# this catches corner cases.
if X.flags.c_contiguous:
return check_array(X.T, copy=False, order='F'), True
else:
return check_array(X, copy=False, order='F'), False
def _fast_dot(A, B):
if B.shape[0] != A.shape[A.ndim - 1]: # check adopted from '_dotblas.c'
raise ValueError
if A.dtype != B.dtype or any(x.dtype not in (np.float32, np.float64)
for x in [A, B]):
warnings.warn('Falling back to np.dot. '
'Data must be of same type of either '
'32 or 64 bit float for the BLAS function, gemm, to be '
'used for an efficient dot operation. ',
NonBLASDotWarning)
raise ValueError
if min(A.shape) == 1 or min(B.shape) == 1 or A.ndim != 2 or B.ndim != 2:
raise ValueError
# scipy 0.9 compliant API
dot = linalg.get_blas_funcs(['gemm'], (A, B))[0]
A, trans_a = _impose_f_order(A)
B, trans_b = _impose_f_order(B)
return dot(alpha=1.0, a=A, b=B, trans_a=trans_a, trans_b=trans_b)
def _have_blas_gemm():
try:
linalg.get_blas_funcs(['gemm'])
return True
except (AttributeError, ValueError):
warnings.warn('Could not import BLAS, falling back to np.dot')
return False
# Only use fast_dot for older NumPy; newer ones have tackled the speed issue.
if np_version < (1, 7, 2) and _have_blas_gemm():
def fast_dot(A, B):
"""Compute fast dot products directly calling BLAS.
This function calls BLAS directly while warranting Fortran contiguity.
This helps avoiding extra copies `np.dot` would have created.
For details see section `Linear Algebra on large Arrays`:
http://wiki.scipy.org/PerformanceTips
Parameters
----------
A, B: instance of np.ndarray
Input arrays. Arrays are supposed to be of the same dtype and to
have exactly 2 dimensions. Currently only floats are supported.
In case these requirements aren't met np.dot(A, B) is returned
instead. To activate the related warning issued in this case
execute the following lines of code:
>> import warnings
>> from sklearn.exceptions import NonBLASDotWarning
>> warnings.simplefilter('always', NonBLASDotWarning)
"""
try:
return _fast_dot(A, B)
except ValueError:
# Maltyped or malformed data.
return np.dot(A, B)
else:
fast_dot = np.dot
def density(w, **kwargs):
"""Compute density of a sparse vector
Return a value between 0 and 1
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, dense_output=False):
"""Dot product that handle the sparse matrix case correctly
Uses BLAS GEMM as replacement for numpy.dot where possible
to avoid unnecessary copies.
"""
if issparse(a) or issparse(b):
ret = a * b
if dense_output and hasattr(ret, "toarray"):
ret = ret.toarray()
return ret
else:
return fast_dot(a, b)
def randomized_range_finder(A, size, n_iter,
power_iteration_normalizer='auto',
random_state=None):
"""Computes an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A: 2D array
The input data matrix
size: integer
Size of the return array
n_iter: integer
Number of power iterations used to stabilize the result
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state: RandomState or an int seed (0 by default)
A random number generator instance
Returns
-------
Q: 2D array
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
# Deal with "auto" mode
if power_iteration_normalizer == 'auto':
if n_iter <= 2:
power_iteration_normalizer = 'none'
else:
power_iteration_normalizer = 'LU'
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == 'none':
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == 'LU':
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == 'QR':
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode='economic')
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode='economic')
return Q
def randomized_svd(M, n_components, n_oversamples=10, n_iter='auto',
power_iteration_normalizer='auto', transpose='auto',
flip_sign=True, random_state=0):
"""Computes a truncated randomized SVD
Parameters
----------
M: ndarray or sparse matrix
Matrix to decompose
n_components: int
Number of singular values and vectors to extract.
n_oversamples: int (default is 10)
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values.
n_iter: int or 'auto' (default is 'auto')
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) `n_iter` in which case is set to 7.
This improves precision with few components.
.. versionchanged:: 0.18
power_iteration_normalizer: 'auto' (default), 'QR', 'LU', 'none'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter`<=2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose: True, False or 'auto' (default)
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign: boolean, (True by default)
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state: RandomState or an int seed (0 by default)
A random number generator instance to make behavior
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision).
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 http://arxiv.org/abs/arXiv:0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == 'auto':
# Checks if the number of iterations is explicitely specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < .1 * min(M.shape) else 4
if transpose == 'auto':
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(M, n_random, n_iter,
power_iteration_normalizer, random_state)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, V = linalg.svd(B, full_matrices=False)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, V = svd_flip(U, V)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, V = svd_flip(U, V, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return V[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], V[:n_components, :]
def logsumexp(arr, axis=0):
"""Computes the sum of arr assuming arr is in the log domain.
Returns log(sum(exp(arr))) while minimizing the possibility of
over/underflow.
Examples
--------
>>> import numpy as np
>>> from sklearn.utils.extmath import logsumexp
>>> a = np.arange(10)
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
>>> logsumexp(a)
9.4586297444267107
"""
arr = np.rollaxis(arr, axis)
# Use the max to normalize, as with the log this is what accumulates
# the less errors
vmax = arr.max(axis=0)
out = np.log(np.sum(np.exp(arr - vmax), axis=0))
out += vmax
return out
def weighted_mode(a, w, axis=0):
"""Returns an array of the weighted modal (most common) value in a
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
w : array_like
n-dimensional array of weights for each value
axis : int, optional
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([ 4.]), array([ 3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([ 2.]), array([ 3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
axis = axis
if a.shape != w.shape:
w = np.zeros(a.shape, dtype=w.dtype) + w
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = (a == score)
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def pinvh(a, cond=None, rcond=None, lower=True):
"""Compute the (Moore-Penrose) pseudo-inverse of a hermetian matrix.
Calculate a generalized inverse of a symmetric matrix using its
eigenvalue decomposition and including all 'large' eigenvalues.
Parameters
----------
a : array, shape (N, N)
Real symmetric or complex hermetian matrix to be pseudo-inverted
cond : float or None, default None
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
rcond : float or None, default None (deprecated)
Cutoff for 'small' eigenvalues.
Singular values smaller than rcond * largest_eigenvalue are considered
zero.
If None or -1, suitable machine precision is used.
lower : boolean
Whether the pertinent array data is taken from the lower or upper
triangle of a. (Default: lower)
Returns
-------
B : array, shape (N, N)
Raises
------
LinAlgError
If eigenvalue does not converge
Examples
--------
>>> import numpy as np
>>> a = np.random.randn(9, 6)
>>> a = np.dot(a, a.T)
>>> B = pinvh(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a = np.asarray_chkfinite(a)
s, u = linalg.eigh(a, lower=lower)
if rcond is not None:
cond = rcond
if cond in [None, -1]:
t = u.dtype.char.lower()
factor = {'f': 1E3, 'd': 1E6}
cond = factor[t] * np.finfo(t).eps
# unlike svd case, eigh can lead to negative eigenvalues
above_cutoff = (abs(s) > cond * np.max(abs(s)))
psigma_diag = np.zeros_like(s)
psigma_diag[above_cutoff] = 1.0 / s[above_cutoff]
return np.dot(u * psigma_diag, np.conjugate(u).T)
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
Examples
--------
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u, v : ndarray
u and v are the output of `linalg.svd` or
`sklearn.utils.extmath.randomized_svd`, with matching inner dimensions
so one can compute `np.dot(u * s, v)`.
u_based_decision : boolean, (default=True)
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, xrange(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[xrange(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``sklearn.utils.fixes.expit``.
Parameters
----------
X: array-like, shape (M, N) or (M, )
Argument to the logistic function
out: array-like, shape: (M, N) or (M, ), optional:
Preallocated output array.
Returns
-------
out: array, shape (M, N) or (M, )
Log of the logistic function evaluated at every point in x
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X: array-like, shape (M, N)
Argument to the logistic function
copy: bool, optional
Copy X or not.
Returns
-------
out: array, shape (M, N)
Softmax function evaluated at every point in x
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def safe_min(X):
"""Returns the minimum value of a dense or a CSR/CSC matrix.
Adapated from http://stackoverflow.com/q/13426580
"""
if issparse(X):
if len(X.data) == 0:
return 0
m = X.data.min()
return m if X.getnnz() == X.size else min(m, 0)
else:
return X.min()
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`."""
min_ = safe_min(X)
if min_ < min_value:
if issparse(X):
raise ValueError("Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse.")
X = X + (min_value - min_)
return X
def _incremental_mean_and_var(X, last_mean=.0, last_variance=None,
last_sample_count=0):
"""Calculate mean update and a Youngs and Cramer variance update.
last_mean and last_variance are statistics computed at the last step by the
function. Both must be initialized to 0.0. In case no scaling is required
last_variance can be None. The mean is always required and returned because
necessary for the calculation of the variance. last_n_samples_seen is the
number of samples encountered until now.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for variance update
last_mean : array-like, shape: (n_features,)
last_variance : array-like, shape: (n_features,)
last_sample_count : int
Returns
-------
updated_mean : array, shape (n_features,)
updated_variance : array, shape (n_features,)
If None, only mean is computed
updated_sample_count : int
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
new_sum = X.sum(axis=0)
new_sample_count = X.shape[0]
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
new_unnormalized_variance = X.var(axis=0) * new_sample_count
if last_sample_count == 0: # Avoid division by 0
updated_unnormalized_variance = new_unnormalized_variance
else:
last_over_new_count = last_sample_count / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance +
new_unnormalized_variance +
last_over_new_count / updated_sample_count *
(last_sum / last_over_new_count - new_sum) ** 2)
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
def stable_cumsum(arr, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum
Parameters
----------
arr : array-like
To be cumulatively summed as flat
rtol : float
Relative tolerance, see ``np.allclose``
atol : float
Absolute tolerance, see ``np.allclose``
"""
out = np.cumsum(arr, dtype=np.float64)
expected = np.sum(arr, dtype=np.float64)
if not np.allclose(out[-1], expected, rtol=rtol, atol=atol):
raise RuntimeError('cumsum was found to be unstable: '
'its last element does not correspond to sum')
return out
| |
import os
import yaml
from string import Template
from copy import deepcopy
from .plugins import ArgcountChecker, OptionalArguments, ArgumentReferences, \
BeforeAfterCall, ConstantArguments, ReturnArguments, GILRelease
class cwrap(object):
BASE_INDENT_SIZE = 6
RETURN_WRAPPERS = {
'void': Template('Py_RETURN_NONE;'),
'long': Template('return PyLong_FromLong($result);'),
'bool': Template('return PyBool_FromLong($result);'),
'void*': Template('return PyLong_FromVoidPtr($result);'),
}
OPTION_TEMPLATE = Template("""
${els}if ($arg_check) {
$pre_arg_assign
$arg_assign
$code
""")
ARG_ASSIGN_TEMPLATE = Template("""${type} ${name} = ${unpack};""")
OPTION_CODE_TEMPLATE = [
'$call',
'$return_result',
]
FUNCTION_CALL_TEMPLATE = Template("$capture_result$cname($call_arg);")
DEFAULT_PLUGIN_CLASSES = [ArgcountChecker, ConstantArguments, OptionalArguments,
ArgumentReferences, BeforeAfterCall, ReturnArguments, GILRelease]
def __init__(self, source, destination=None, plugins=[], default_plugins=True):
if destination is None:
destination = source.replace('.cwrap', '.cpp')
self.plugins = plugins
if default_plugins:
defaults = [cls() for cls in self.DEFAULT_PLUGIN_CLASSES]
self.plugins = defaults + self.plugins
for plugin in self.plugins:
plugin.initialize(self)
self.base_path = os.path.dirname(os.path.abspath(source))
with open(source, 'r') as f:
declarations = f.read()
wrapper = self.wrap_declarations(declarations)
for plugin in self.plugins:
wrapper = plugin.process_full_file(wrapper)
with open(destination, 'w') as f:
f.write(wrapper)
def wrap_declarations(self, declarations):
lines = declarations.split('\n')
declaration_lines = []
output = []
in_declaration = False
i = 0
while i < len(lines):
line = lines[i]
if line == '[[':
declaration_lines = []
in_declaration = True
elif line == ']]':
in_declaration = False
declaration = yaml.load('\n'.join(declaration_lines))
self.set_declaration_defaults(declaration)
# Pass declaration in a list - maybe some plugins want to add
# multiple wrappers
declarations = [declaration]
for plugin in self.plugins:
declarations = plugin.process_declarations(declarations)
# Generate wrappers for all declarations and append them to
# the output
for declaration in declarations:
wrapper = self.generate_wrapper(declaration)
for plugin in self.plugins:
wrapper = plugin.process_wrapper(wrapper, declaration)
output.append(wrapper)
elif in_declaration:
declaration_lines.append(line)
elif '!!inc ' == line[:6]:
fname = os.path.join(self.base_path, line[6:].strip())
with open(fname, 'r') as f:
included = f.read().split('\n')
# insert it into lines at position i+1
lines[i + 1:i + 1] = included
else:
output.append(line)
i += 1
return '\n'.join(output)
def set_declaration_defaults(self, declaration):
declaration.setdefault('arguments', [])
declaration.setdefault('return', 'void')
if 'cname' not in declaration:
declaration['cname'] = declaration['name']
# Simulate multiple dispatch, even if it's not necessary
if 'options' not in declaration:
declaration['options'] = [{'arguments': declaration['arguments']}]
del declaration['arguments']
# Parse arguments (some of them can be strings)
for option in declaration['options']:
option['arguments'] = self.parse_arguments(option['arguments'])
# Propagate defaults from declaration to options
for option in declaration['options']:
for k, v in declaration.items():
if k != 'name' and k != 'options':
option.setdefault(k, v)
def parse_arguments(self, args):
new_args = []
for arg in args:
# Simple arg declaration of form "<type> <name>"
if isinstance(arg, str):
t, _, name = arg.partition(' ')
new_args.append({'type': t, 'name': name})
elif isinstance(arg, dict):
if 'arg' in arg:
arg['type'], _, arg['name'] = arg['arg'].partition(' ')
del arg['arg']
new_args.append(arg)
else:
assert False
return new_args
def search_plugins(self, fnname, args, fallback):
for plugin in self.plugins:
wrapper = getattr(plugin, fnname)(*args)
if wrapper is not None:
return wrapper
return fallback(*args)
def get_type_check(self, arg, option):
return self.search_plugins('get_type_check', (arg, option), lambda arg, _: None)
def get_type_unpack(self, arg, option):
return self.search_plugins('get_type_unpack', (arg, option), lambda arg, _: None)
def get_return_wrapper(self, option):
return self.search_plugins('get_return_wrapper', (option,), lambda _: self.RETURN_WRAPPERS[option['return']])
def get_wrapper_template(self, declaration):
return self.search_plugins('get_wrapper_template', (declaration,), lambda _: None)
def get_assign_args(self, arguments):
return self.search_plugins('get_assign_args', (arguments,), lambda _: arguments)
def get_arg_accessor(self, arg, option):
def wrap_accessor(arg, _):
if arg.get('idx') is None:
raise RuntimeError("Missing accessor for '{} {}'".format(
arg['type'], arg['name']))
return 'PyTuple_GET_ITEM(args, {})'.format(arg['idx'])
return self.search_plugins('get_arg_accessor', (arg, option), wrap_accessor)
def generate_wrapper(self, declaration):
wrapper = ''
for i, option in enumerate(declaration['options']):
option_wrapper = self.generate_option(option, is_first=(i == 0))
for plugin in self.plugins:
option_wrapper = plugin.process_option_code(option_wrapper, option)
wrapper += option_wrapper
return self.get_wrapper_template(declaration).substitute(name=declaration['name'], options=wrapper)
def map_selected_arguments(self, base_fn_name, plugin_fn_name, option, arguments):
result = []
for arg in arguments:
accessor = self.get_arg_accessor(arg, option)
tmpl = getattr(self, base_fn_name)(arg, option)
if tmpl is None:
fn = 'check' if base_fn_name == 'get_type_check' else 'unpack'
raise RuntimeError("Missing type {} for '{} {}'".format(
fn, arg['type'], arg['name']))
res = tmpl.substitute(arg=accessor, idx=arg.get('idx'))
for plugin in self.plugins:
res = getattr(plugin, plugin_fn_name)(res, arg, accessor)
result.append(res)
return result
def build_option_args(self, arguments, arg_unpack):
assignement = []
call_arg = []
# If types or names needs to be changed
arguments = self.get_assign_args(arguments)
for arg, unpack in zip(arguments, arg_unpack):
if arg['type'] == 'CONSTANT':
call_arg.append(str(arg['name']))
else:
var_name = "arg_" + str(arg.get('assign_name', arg['name']))
res = self.ARG_ASSIGN_TEMPLATE.substitute(
type=arg['type'],
name=var_name,
unpack=unpack)
if var_name not in call_arg:
assignement.append(res)
call_arg.append(var_name)
return assignement, call_arg
def indent_code(self, code):
if code == '':
return code
code_lines = map(lambda s: s.strip(), code.split('\n'))
code = '\n'
depth = self.BASE_INDENT_SIZE
for line in code_lines:
depth -= line.count('}') * 2
code += ' ' * depth + line + '\n'
depth += line.count('{') * 2
depth += line.count('(') * 4
depth -= line.count(')') * 4
return code[:-1]
def generate_option(self, option, is_first):
checked_args = list(filter(
lambda arg: 'ignore_check' not in arg or not arg['ignore_check'],
option['arguments']))
option['num_checked_args'] = len(checked_args)
idx_args = list(filter(
lambda arg: not arg.get('ignore_check') and not arg.get('no_idx'),
option['arguments']))
for i, arg in enumerate(idx_args):
arg['idx'] = i
# Generate checks
arg_checks = self.map_selected_arguments('get_type_check',
'process_single_check', option, checked_args)
arg_checks = ' &&\n '.join(arg_checks)
for plugin in self.plugins:
arg_checks = plugin.process_all_checks(arg_checks, option)
# Generate pre_arg assign
pre_arg_assign = []
for plugin in self.plugins:
pre_arg_assign = plugin.process_pre_arg_assign(pre_arg_assign, option)
# Generate arg assignment and call arguments
arg_unpack = self.map_selected_arguments('get_type_unpack',
'process_single_unpack', option, option['arguments'])
arg_assign, call_arg = self.build_option_args(option['arguments'], arg_unpack)
call_arg = ', '.join(call_arg)
for plugin in self.plugins:
call_arg = plugin.process_all_call_arg(call_arg, option)
# Generate call
try:
return_result = self.get_return_wrapper(option).substitute()
call = self.FUNCTION_CALL_TEMPLATE.substitute(capture_result='',
cname=option['cname'], call_arg=call_arg)
except KeyError:
return_result = self.get_return_wrapper(option).substitute(result='__result')
call = self.FUNCTION_CALL_TEMPLATE.substitute(capture_result=(option['return'] + ' __result = '),
cname=option['cname'], call_arg=call_arg)
code_template = deepcopy(self.OPTION_CODE_TEMPLATE)
for plugin in self.plugins:
code_template = plugin.process_option_code_template(code_template,
option)
code_template = Template('\n'.join(code_template))
code = code_template.substitute(call=call, return_result=return_result)
code = self.indent_code(code)
pre_arg_assign = self.indent_code('\n'.join(pre_arg_assign))
arg_assign = self.indent_code('\n'.join(arg_assign))
# Put everything together
return self.OPTION_TEMPLATE.substitute(
els=('} else ' if not is_first else ''),
arg_check=arg_checks,
pre_arg_assign=pre_arg_assign,
arg_assign=arg_assign,
code=code,
)
| |
# Copyright 2014 eBay Software Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet.timeout import Timeout
from oslo_log import log as logging
from trove.common import cfg
from trove.common.exception import PollTimeOut
from trove.common.i18n import _
from trove.common.instance import ServiceStatuses
from trove.common.strategies.cluster import base
from trove.common import utils
from trove.instance import models
from trove.instance.models import DBInstance
from trove.instance.models import Instance
from trove.taskmanager import api as task_api
import trove.taskmanager.models as task_models
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
USAGE_SLEEP_TIME = CONF.usage_sleep_time # seconds.
class MongoDbTaskManagerStrategy(base.BaseTaskManagerStrategy):
@property
def task_manager_api_class(self):
return MongoDbTaskManagerAPI
@property
def task_manager_cluster_tasks_class(self):
return MongoDbClusterTasks
@property
def task_manager_manager_actions(self):
return {'add_shard_cluster': self._manager_add_shard}
def _manager_add_shard(self, context, cluster_id, shard_id,
replica_set_name):
cluster_tasks = task_models.ClusterTasks.load(
context,
cluster_id,
MongoDbClusterTasks)
cluster_tasks.add_shard_cluster(context, cluster_id, shard_id,
replica_set_name)
class MongoDbClusterTasks(task_models.ClusterTasks):
def create_cluster(self, context, cluster_id):
LOG.debug("begin create_cluster for id: %s" % cluster_id)
def _create_cluster():
# fetch instances by cluster_id against instances table
db_instances = DBInstance.find_all(cluster_id=cluster_id).all()
instance_ids = [db_instance.id for db_instance in db_instances]
LOG.debug("instances in cluster %s: %s" % (cluster_id,
instance_ids))
if not self._all_instances_ready(instance_ids, cluster_id):
return
LOG.debug("all instances in cluster %s ready." % cluster_id)
instances = [Instance.load(context, instance_id) for instance_id
in instance_ids]
# filter query routers in instances into a new list: query_routers
query_routers = [instance for instance in instances if
instance.type == 'query_router']
LOG.debug("query routers: %s" %
[instance.id for instance in query_routers])
# filter config servers in instances into new list: config_servers
config_servers = [instance for instance in instances if
instance.type == 'config_server']
LOG.debug("config servers: %s" %
[instance.id for instance in config_servers])
# filter members (non router/configsvr) into a new list: members
members = [instance for instance in instances if
instance.type == 'member']
LOG.debug("members: %s" %
[instance.id for instance in members])
# for config_server in config_servers, append ip/hostname to
# "config_server_hosts", then
# peel off the replica-set name and ip/hostname from 'x'
config_server_ips = [self.get_ip(instance)
for instance in config_servers]
LOG.debug("config server ips: %s" % config_server_ips)
if not self._add_query_routers(query_routers,
config_server_ips):
return
if not self._create_shard(query_routers[0], members):
return
# call to start checking status
for instance in instances:
self.get_guest(instance).cluster_complete()
cluster_usage_timeout = CONF.cluster_usage_timeout
timeout = Timeout(cluster_usage_timeout)
try:
_create_cluster()
self.reset_task()
except Timeout as t:
if t is not timeout:
raise # not my timeout
LOG.exception(_("timeout for building cluster."))
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
LOG.debug("end create_cluster for id: %s" % cluster_id)
def add_shard_cluster(self, context, cluster_id, shard_id,
replica_set_name):
LOG.debug("begin add_shard_cluster for cluster %s shard %s"
% (cluster_id, shard_id))
def _add_shard_cluster():
db_instances = DBInstance.find_all(cluster_id=cluster_id,
shard_id=shard_id).all()
instance_ids = [db_instance.id for db_instance in db_instances]
LOG.debug("instances in shard %s: %s" % (shard_id,
instance_ids))
if not self._all_instances_ready(instance_ids, cluster_id,
shard_id):
return
members = [Instance.load(context, instance_id)
for instance_id in instance_ids]
db_query_routers = DBInstance.find_all(cluster_id=cluster_id,
type='query_router',
deleted=False).all()
query_routers = [Instance.load(context, db_query_router.id)
for db_query_router in db_query_routers]
if not self._create_shard(query_routers[0], members):
return
for member in members:
self.get_guest(member).cluster_complete()
cluster_usage_timeout = CONF.cluster_usage_timeout
timeout = Timeout(cluster_usage_timeout)
try:
_add_shard_cluster()
self.reset_task()
except Timeout as t:
if t is not timeout:
raise # not my timeout
LOG.exception(_("timeout for building shard."))
self.update_statuses_on_failure(cluster_id, shard_id)
finally:
timeout.cancel()
LOG.debug("end add_shard_cluster for cluster %s shard %s"
% (cluster_id, shard_id))
def grow_cluster(self, context, cluster_id, instance_ids):
LOG.debug("begin grow_cluster for MongoDB cluster %s" % cluster_id)
def _grow_cluster():
new_instances = [db_instance for db_instance in self.db_instances
if db_instance.id in instance_ids]
new_members = [db_instance for db_instance in new_instances
if db_instance.type == 'member']
new_query_routers = [db_instance for db_instance in new_instances
if db_instance.type == 'query_router']
instances = []
if new_members:
shard_ids = set([db_instance.shard_id for db_instance
in new_members])
query_router_id = self._get_running_query_router_id()
if not query_router_id:
return
for shard_id in shard_ids:
LOG.debug('growing cluster by adding shard %s on query '
'router %s' % (shard_id, query_router_id))
member_ids = [db_instance.id for db_instance in new_members
if db_instance.shard_id == shard_id]
if not self._all_instances_ready(
member_ids, cluster_id, shard_id
):
return
members = [Instance.load(context, member_id)
for member_id in member_ids]
query_router = Instance.load(context, query_router_id)
if not self._create_shard(query_router, members):
return
instances.extend(members)
if new_query_routers:
query_router_ids = [db_instance.id for db_instance
in new_query_routers]
config_servers_ids = [db_instance.id for db_instance
in self.db_instances
if db_instance.type == 'config_server']
LOG.debug('growing cluster by adding query routers %s, '
'with config servers %s'
% (query_router_ids, config_servers_ids))
if not self._all_instances_ready(
query_router_ids, cluster_id
):
return
query_routers = [Instance.load(context, instance_id)
for instance_id in query_router_ids]
config_servers_ips = [
self.get_ip(Instance.load(context, config_server_id))
for config_server_id in config_servers_ids
]
if not self._add_query_routers(
query_routers, config_servers_ips,
admin_password=self.get_cluster_admin_password(context)
):
return
instances.extend(query_routers)
for instance in instances:
self.get_guest(instance).cluster_complete()
cluster_usage_timeout = CONF.cluster_usage_timeout
timeout = Timeout(cluster_usage_timeout)
try:
_grow_cluster()
self.reset_task()
except Timeout as t:
if t is not timeout:
raise # not my timeout
LOG.exception(_("timeout for growing cluster."))
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
LOG.debug("end grow_cluster for MongoDB cluster %s" % self.id)
def shrink_cluster(self, context, cluster_id, instance_ids):
LOG.debug("begin shrink_cluster for MongoDB cluster %s" % cluster_id)
def _shrink_cluster():
def all_instances_marked_deleted():
non_deleted_instances = DBInstance.find_all(
cluster_id=cluster_id, deleted=False).all()
non_deleted_ids = [db_instance.id for db_instance
in non_deleted_instances]
return not bool(
set(instance_ids).intersection(set(non_deleted_ids))
)
try:
utils.poll_until(all_instances_marked_deleted,
sleep_time=2,
time_out=CONF.cluster_delete_time_out)
except PollTimeOut:
LOG.error(_("timeout for instances to be marked as deleted."))
return
cluster_usage_timeout = CONF.cluster_usage_timeout
timeout = Timeout(cluster_usage_timeout)
try:
_shrink_cluster()
self.reset_task()
except Timeout as t:
if t is not timeout:
raise # not my timeout
LOG.exception(_("timeout for shrinking cluster."))
self.update_statuses_on_failure(cluster_id)
finally:
timeout.cancel()
LOG.debug("end shrink_cluster for MongoDB cluster %s" % self.id)
def get_cluster_admin_password(self, context):
"""The cluster admin's user credentials are stored on all query
routers. Find one and get the guest to return the password.
"""
instance = Instance.load(context, self._get_running_query_router_id())
return self.get_guest(instance).get_admin_password()
def _init_replica_set(self, primary_member, other_members):
"""Initialize the replica set by calling the primary member guest's
add_members.
"""
LOG.debug('initializing replica set on %s' % primary_member.id)
other_members_ips = []
try:
for member in other_members:
other_members_ips.append(self.get_ip(member))
self.get_guest(member).restart()
self.get_guest(primary_member).prep_primary()
self.get_guest(primary_member).add_members(other_members_ips)
except Exception:
LOG.exception(_("error initializing replica set"))
self.update_statuses_on_failure(self.id,
shard_id=primary_member.shard_id)
return False
return True
def _create_shard(self, query_router, members):
"""Create a replica set out of the given member instances and add it as
a shard to the cluster.
"""
primary_member = members[0]
other_members = members[1:]
if not self._init_replica_set(primary_member, other_members):
return False
replica_set = self.get_guest(primary_member).get_replica_set_name()
LOG.debug('adding replica set %s as shard %s to cluster %s'
% (replica_set, primary_member.shard_id, self.id))
try:
self.get_guest(query_router).add_shard(
replica_set, self.get_ip(primary_member))
except Exception:
LOG.exception(_("error adding shard"))
self.update_statuses_on_failure(self.id,
shard_id=primary_member.shard_id)
return False
return True
def _get_running_query_router_id(self):
"""Get a query router in this cluster that is in the RUNNING state."""
for instance_id in [db_instance.id for db_instance in self.db_instances
if db_instance.type == 'query_router']:
status = models.InstanceServiceStatus.find_by(
instance_id=instance_id).get_status()
if status == ServiceStatuses.RUNNING:
return instance_id
LOG.exception(_("no query routers ready to accept requests"))
self.update_statuses_on_failure(self.id)
return False
def _add_query_routers(self, query_routers, config_server_ips,
admin_password=None):
"""Configure the given query routers for the cluster.
If this is a new_cluster an admin user will be created with a randomly
generated password, else the password needs to be retrieved from
and existing query router.
"""
LOG.debug('adding new query router(s) %s with config server '
'ips %s' % ([i.id for i in query_routers],
config_server_ips))
for query_router in query_routers:
try:
LOG.debug("calling add_config_servers on query router %s"
% query_router.id)
guest = self.get_guest(query_router)
guest.add_config_servers(config_server_ips)
if not admin_password:
LOG.debug("creating cluster admin user")
admin_password = utils.generate_random_password()
guest.create_admin_user(admin_password)
else:
guest.store_admin_password(admin_password)
except Exception:
LOG.exception(_("error adding config servers"))
self.update_statuses_on_failure(self.id)
return False
return True
class MongoDbTaskManagerAPI(task_api.API):
def mongodb_add_shard_cluster(self, cluster_id, shard_id,
replica_set_name):
LOG.debug("Making async call to add shard cluster %s " % cluster_id)
version = task_api.API.API_BASE_VERSION
cctxt = self.client.prepare(version=version)
cctxt.cast(self.context,
"add_shard_cluster",
cluster_id=cluster_id,
shard_id=shard_id,
replica_set_name=replica_set_name)
| |
''' Copyright 2015 Neokami GmbH. '''
from .Base import Base
from .HttpClients.NeokamiCurl import NeokamiHttpClient
from .NeokamiResponse import NeokamiResponse
from .Exceptions.NeokamiParametersException import NeokamiParametersException
NeokamiHttpClient = NeokamiHttpClient()
class NeokamiRequest(Base):
#if set to 0, analysis is queued and can be retrieved at a later time
#using the job id; otherwise endpoint tries $max_retries number of times
#before returning
wait = 1
#maximum number to retry for results if wait is set to 1
max_retries = 5
#time in seconds to wait between retries for results
sleep = 1
#api result output type, can be xml, json
output_type = None
#format, can be json, array or xml
output_format = 'array'
#the api key, get yours now @ www.neokami.com
apiKey = None
#if set to true, exceptions will be suppressed
silentFails = False
def getOutputFormat(self):
'''
Get the output format
:return output_format:
'''
return self.output_format
def setOutputFormat(self, output_format):
'''
Set output format
:return self:
'''
self.output_format = output_format
return self
def getApiKey(self):
'''
Get api key
:return apiKey:
'''
return self.apiKey
def setApiKey(self, apiKey):
'''
Set api key
:param apiKey:
:return self:
'''
self.apiKey = apiKey
return self
def getSilentFails(self):
'''
Get silent fails
:return bool silentFails:
'''
return self.silentFails
def setSilentFails(self, silentFails):
'''
Set silent fails
:param bool silentFails:
'''
self.silentFails = silentFails
def checkHasAllParameters(self, required):
'''
Check that our request has all the parameters the server expect
:param array required:
:return bool True:
'''
for req in required:
if(not hasattr(self, req) or getattr(self, req) == None):
raise NeokamiParametersException('Missing parameter: ' + req + '.')
return True
def getWait(self):
'''
Get wait value
:return float wait:
'''
return self.wait
def setWait(self, wait):
'''
Set wait value
:param float wait:
:return self:
'''
self.wait = wait
return self
def getMaxRetries(self):
'''
Get max retries value
:return int max_retries:
'''
return self.max_retries
def setMaxRetries(self,max_retries):
'''
Set max retries value
:param int max_retries:
:return self:
'''
self.max_retries = max_retries
return self
def getSleep(self):
'''
Get sleep value
:return float sleep:
'''
return self.sleep
def setSleep(self, sleep):
'''
Set sleep value
:param float sleep:
:return self:
'''
self.sleep = sleep
return self
def getResult(self, jobId):
'''
Get results using the job id
:param string jobId:
:return object NeokamiResponse:
'''
response = NeokamiHttpClient.post(
self.getUrl('/engine/job/results'),
self.apiKey,
{
'job_id': jobId,
'sdk_version' : self.SDK_VERSION,
'sdk_lang' : self.SDK_LANG
}
)
return NeokamiResponse(response, self.getOutputFormat(), self.getSilentFails())
def uploader(self, api_url, field_request, data_upload, job_id=None):
'''
Upload data to the specified endpoint and optionally using the same job_id that a previous upload
:param string api_url:
:param string field_request:
:param mix data_upload:
:param string job_id:
:return object NeokamiResponse:
'''
data = {
'wait': self.getWait(),
'max_retries': self.getMaxRetries(),
'sleep': self.getSleep(),
'sdk_version' : self.SDK_VERSION,
'sdk_lang' : self.SDK_LANG,
field_request: data_upload
}
if(job_id is not None): data['job_id'] = job_id
response = NeokamiHttpClient.post(
self.getUrl(api_url),
self.apiKey,
data
)
return NeokamiResponse(response, self.getOutputFormat(), self.getSilentFails())
def analyseFromUpload(self, api_url, job_id):
'''
Analyse uploaded data for the specified job_id
:param string api_url:
:param string job_id:
:return object NeokamiResponse:
'''
response = NeokamiHttpClient.post(
self.getUrl(api_url),
self.apiKey,
{
'job_id': job_id,
'sdk_version': self.SDK_VERSION,
'sdk_lang': self.SDK_LANG
}
)
return NeokamiResponse(response, self.getOutputFormat(), self.getSilentFails())
def getOutputType(self):
'''
Get output type value
:return string output_type:
'''
return self.output_type
def setOutputType(self, output_type):
'''
Set output_type
:param string output_type:
:return self:
'''
validTypes = ['memory', 'rabbitmq']
if output_type not in validTypes:
raise NeokamiParametersException('Specified output is not valid. Valid types are ' + ', '.join(validTypes))
self.output_type = output_type
return self
| |
# -*- coding: utf-8 -*-
#===============================================================================
# from constants import MAX_CONSTRAINT_NAME
# from constants import MAX_INDEX_NAME
# from constants import MAX_TABLE_NAME
# from constants import MAX_SEQNAME
#===============================================================================
from django.db.models.sql import compiler
from itertools import izip
from django.db.utils import DatabaseError
from datetime import datetime
import re
from django.db.models.sql.datastructures import EmptyResultSet
from django.utils.encoding import smart_str, smart_unicode
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR, GET_ITERATOR_CHUNK_SIZE)
class SQLCompiler(compiler.SQLCompiler):
def formatTableName(self,data):
#import pdb; pdb.set_trace()
if isinstance(data,list) is True:
for i,v in enumerate(data):
tv=v.split('"')
for iv,vv in enumerate(tv):
tv[iv]=vv[:self.connection.ops.max_name_length()]
data[i]='"'.join(tv)
return data
else :
tdata=data.split('"')
for i,v in enumerate(tdata):
#===============================================================
# If where clause is IN (val,val...), or LIKE be careful to not substring the IN clause
#===============================================================
if 'IN (' in v or ' LIKE ' in v:
if 'COLLATE' in v:
tdata[i]=re.sub('COLLATE (\w+) ','',v)
else:
tdata[i]=v
else:
tdata[i]=v[:self.connection.ops.max_name_length()]
#===============================================================
# if 'IN (' not in v:
# tdata[i]=v[:MAX_TABLE_NAME]
# else:
# tdata[i]=v
#===============================================================
return '"'.join(tdata)
def as_sql(self, with_limits=True, with_col_aliases=False):
#import pdb; pdb.set_trace()
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols = self.get_columns(with_col_aliases)
ordering, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
params = []
for val in self.query.extra_select.itervalues():
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
distinct_fields=self.formatTableName(distinct_fields)
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols= self.formatTableName(out_cols)
result.append(', '.join(out_cols + self.query.ordering_aliases))
result.append('FROM')
from_ = self.formatTableName(from_)
result.extend(from_)
params.extend(f_params)
if where:
where=self.formatTableName(where)
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(True)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if ordering:
# If the backend can't group by PK (i.e., any database
# other than MySQL), then any fields mentioned in the
# ordering clause needs to be in the group by clause.
if not self.connection.features.allows_group_by_pk:
for col, col_params in ordering_group_by:
if col not in grouping:
grouping.append(str(col))
gb_params.extend(col_params)
else:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
#===================================================================
# OpenEdge use TOP, not LIMIT
#===================================================================
if self.query.high_mark is not None:
result[0]+=' TOP %d' % (self.query.high_mark - self.query.low_mark)
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result[0]+=' TOP %d' % val
#result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
#import pdb; pdb.set_trace()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.query.ordering_aliases:
return cursor.fetchone()[:-len(self.query.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.query.ordering_aliases:
result = order_modified_iter(cursor, len(self.query.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
class SQLInsertCompiler(SQLCompiler):
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.model._meta
self.ID = None
#import pdb; pdb.set_trace()
cursor = self.connection.cursor()
owner = self.connection.owner
table_has_col_id = False
curtable=opts.db_table[:self.connection.ops.max_name_length()]
#import pdb; pdb.set_trace()
#=======================================================================
# Check if table has id col, it's used to emulate autoincrement col
#=======================================================================
table_has_col_id = self.connection.ops.has_id_col(curtable,cursor,owner)
#======================20131102=================================================
# if len(cursor.execute("select col from sysprogress.syscolumns where tbl = '%s' and owner = '%s' and col = 'id'"%(curtable,owner)).fetchall()) > 0 :
# table_has_col_id = True
#=======================================================================
result = ['INSERT INTO %s' % qn(curtable)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
lfields='(%s' % ', '.join([qn(f.column) for f in fields])
#=======================================================================
# Test if id col is provided , if not, we have to add it (Openedge does not support autoincrement field)
#=======================================================================
hasIdCol=True
if re.search('"id"',lfields) is None and table_has_col_id is True:
hasIdCol=False
lfields+=',"id")'
else:
#import pdb; pdb.set_trace()
lfields+=')'
result.append(lfields)
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in izip(fields, val)]
for val in values
]
params = self.connection.ops.modify_insert_params(placeholders, params)
#import pdb; pdb.set_trace()
params = [
smart_str(v) if isinstance(v, unicode) else v
for v in params[0]
]
if hasIdCol is False and table_has_col_id is True and can_bulk is False:
#import pdb; pdb.set_trace()
self.ID=self.connection.ops.get_autoinc_keyval(opts.db_table, 'id',self.connection.ops.max_name_length(),cursor)
#===========================20131101========================================
# cursor.execute('select id_%s.nextval from dual'%opts.db_table[:self.connection.ops.max_name_length()-3])
# self.ID=cursor.fetchone()[0]
#===================================================================
params.append(self.ID)
if self.return_id and self.connection.features.can_return_id_from_insert:
#===================================================================
# Transcode unicode to string (openedge issue)
#===================================================================
col = "%s.%s" % (qn(curtable), qn(opts.pk.column))
result.append("VALUES (%s" % ", ".join(placeholders[0]))
if hasIdCol is False and table_has_col_id is True:
result[-1]+=',%'+'s)'
else:
result[-1]+=')'
#import pdb; pdb.set_trace()
return [(" ".join(result), tuple(params))]
if can_bulk:
#import pdb; pdb.set_trace()
self.bulk_load=True
tabID=None
if hasIdCol is False and table_has_col_id is True:
for i,v in enumerate(values):
values[i].append(self.connection.ops.get_autoinc_keyval(opts.db_table, 'id',self.connection.ops.max_name_length(),cursor))
#======================20131101=====================================
# values[i].append(cursor.execute('select id_%s.nextval from dual'%opts.db_table[:self.connection.ops.max_name_length()-3]).fetchone()[0])
#===========================================================
result.append(self.connection.ops.bulk_insert_sql(fields, len(values),OEid=1))
else:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
#return [(" ".join(result),[v for val in values for v in val])]
return [(" ".join(result),values)]
else:
self.bulk_load=False
result.append("VALUES (%s" % ", ".join(placeholders[0]))
if hasIdCol is False:
result[-1]+=',%'+'s)'
else:
result[-1]+=')'
#import pdb; pdb.set_trace()
return [(" ".join(result), tuple(params))]
def execute_sql(self, return_id=False):
self.bulk_load=False
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
sql_param=self.as_sql()
if self.bulk_load is not True:
for sql, params in sql_param:
cursor.execute(sql, params)
else:
cursor.executemany(sql_param[0][0],sql_param[0][1])
if not (return_id and cursor):
return
if self.ID is not None:
return self.ID
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.model._meta.db_table, self.query.model._meta.pk.column)
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, SQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, SQLCompiler):
#===========================================================================
# def _hasConstraints(self,curtable):
# print '>>> Controle ',curtable
# cursor = self.connection.cursor()
# owner = self.connection.owner
# hasConstraints = cursor.execute("select tblname from sysprogress.sys_ref_constrs where reftblname = '%s' and owner = '%s'"%(curtable,owner)).fetchall()
# if len(hasConstraints) > 0:
# print '>>>',hasConstraints
#
#===========================================================================
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
#=======================================================================
# self._hasConstraints(self.query.tables[0])
#=======================================================================
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
##DOTO: Delete after test
#=======================================================================
# print '>>>',result,params
# if result[0] == 'DELETE FROM "django_flatpage_sites"' :
# import pdb; pdb.set_trace()
#=======================================================================
return ' '.join(result), tuple(params)
| |
import sqlite3
import pprint
import uuid
ppsetup = pprint.PrettyPrinter(indent=4)
pp = ppsetup.pprint
# Wipes the AOI Data
CLEAR_DESTINATION = False
# Source and Destination DB
source_db = "C:\\Users\\James\\Documents\\Tobii Studio Projects\\Test 1\\database.db3"
dest_db = "C:\\Users\\James\\Documents\\Tobii Studio Projects\\Test 2\\database.db3"
# Backup the destination database ... just in case
from shutil import copyfile
from datetime import datetime
now = datetime.now() # current date and time
copyfile(dest_db, "{}.{}".format(dest_db,now.strftime("%Y%m%d%H%M%S")))
# Create connections and cursors
source_conn = sqlite3.connect(source_db)
dest_conn = sqlite3.connect(dest_db)
source_cursor_1 = source_conn.cursor()
dest_cursor_1 = dest_conn.cursor()
# Clear the Destination if set
if CLEAR_DESTINATION:
print("- DELETE FROM MovingAoi")
dest_cursor_1.execute("DELETE FROM MovingAoi")
print("- DELETE FROM Keyframe")
dest_cursor_1.execute("DELETE FROM Keyframe")
print("- DELETE FROM MovingAoiGroup")
dest_cursor_1.execute("DELETE FROM MovingAoiGroup")
print("- DELETE FROM MovingAoi_MovingAoiGroup")
dest_cursor_1.execute("DELETE FROM MovingAoi_MovingAoiGroup")
dest_conn.commit()
# Create a media lookup table
medialookup = { 'source' : {}, 'dest' : {} }
print('Examining source media...')
for row in source_cursor_1.execute("SELECT MediaID, FileId, TestId, Name FROM Media"):
if 'Name_to_MediaID' not in medialookup['source']:
medialookup['source']['Name_to_MediaID'] = {}
if 'MediaID_to_Name' not in medialookup['source']:
medialookup['source']['MediaID_to_Name'] = {}
if row[3] not in medialookup['source']['Name_to_MediaID']:
medialookup['source']['Name_to_MediaID'][row[3]] = [row[0]]
else:
medialookup['source']['Name_to_MediaID'][row[3]].append(row[0])
medialookup['source']['MediaID_to_Name'][row[0]] = row[3]
print('Examining dest media...')
for row in dest_cursor_1.execute("SELECT MediaID, FileId, TestId, Name FROM Media"):
if 'Name_to_MediaID' not in medialookup['dest']:
medialookup['dest']['Name_to_MediaID'] = {}
if 'MediaID_to_Name' not in medialookup['dest']:
medialookup['dest']['MediaID_to_Name'] = {}
if row[3] not in medialookup['dest']['Name_to_MediaID']:
medialookup['dest']['Name_to_MediaID'][row[3]] = [row[0]]
else:
medialookup['dest']['Name_to_MediaID'][row[3]].append(row[0])
medialookup['dest']['MediaID_to_Name'][row[0]] = row[3]
print('Built the following medialookup')
pp(medialookup)
print('Looking in source media for AOI')
# Create additional cursors (cannot be reused in sqlite/python when nesting in for loops)
source_cursor_2 = source_conn.cursor()
# Holder for old aoi to new aoi, needed for grouping later
old_aoi_uuid_to_new_aoi_uuid = {}
for row in source_cursor_2.execute("SELECT MovingAoiId, MediaId, TestId, Name, Color, ZOrder, TextExportOrder, VersionNo FROM MovingAoi"):
# Store names for ease
aoi_id = row[0]
source_media_id = row[1]
try:
video_name = medialookup['source']['MediaID_to_Name'][row[1]]
except:
video_name = "NULL-VIDEO-NOT-FOUND"
aoi_name = row[3]
print("Found AOI {} against video - {}".format(aoi_id, video_name))
# Check to see if the MediaID's name exists in the destination
if video_name in medialookup['dest']['Name_to_MediaID']:
print('Video {} exists in both source and destination'.format(video_name))
for i, dest_media_id in enumerate(medialookup['dest']['Name_to_MediaID'][video_name]):
print('- Processing Video {} of {} with media_id {}'.format(i+1, video_name, dest_media_id))
new_aoi_id = uuid.uuid1()
print('-- Creating unique AOI id to avoid duplication, Original AOI ID = {}, Media ID = {}, New AOI ID = {}'.format(aoi_id, dest_media_id, new_aoi_id))
if aoi_id not in old_aoi_uuid_to_new_aoi_uuid:
old_aoi_uuid_to_new_aoi_uuid[aoi_id] = []
old_aoi_uuid_to_new_aoi_uuid[aoi_id].append(new_aoi_id)
print('--- Checking to see if AOI {} exists for video {} in the destination'.format(new_aoi_id, video_name))
# Check using the equivalent name from the destination
dest_cursor_2 = dest_conn.cursor()
dest_cursor_2.execute("SELECT MovingAoiId, MediaId FROM MovingAoi WHERE MovingAoiID = '{}' AND MediaId = '{}'".format(new_aoi_id, dest_media_id))
# If we didn't find the area of interest, add it to the table
if dest_cursor_2.fetchall() == []:
print('---- AOI {} does not exist on Destination, adding'.format(aoi_name))
print("----- DEBUG INSERT INTO MovingAoi VALUES('{}','{}','{}','{}','{}','{}','{}','{}')".format(new_aoi_id, dest_media_id, row[2], aoi_name, row[4], row[5], row[6], row[7]))
dest_cursor_2.execute("INSERT INTO MovingAoi VALUES('{}','{}','{}','{}','{}','{}','{}','{}')".format(new_aoi_id, dest_media_id, row[2], aoi_name, row[4], row[5], row[6], row[7]))
dest_conn.commit()
# Find all of the Keyframes that relate to the MovingAoi
print('------ Checking for Keyframes for AOI {}'.format(aoi_name))
source_cursor_3 = source_conn.cursor()
for keyframe_row in source_cursor_3.execute("SELECT KeyFrameId, MovingAoiId, PointInTime, IsCollectingData, VersionNo, Vertices FROM KeyFrame WHERE MovingAoiId = '{}'".format(aoi_id)).fetchall():
keyframe_id = keyframe_row[0]
print('------- Found Keyframe ID {}'.format(keyframe_id))
new_keyframe_id = uuid.uuid1()
print('-------- Creating unique KeyFrame ID to avoid duplication, Original KeyFrame ID = {}, Media ID = {}, New KeyFrame ID = {}'.format(keyframe_id, dest_media_id, new_keyframe_id))
print('--------- Checking to see if Keyframe ID {} exists on the destination'.format(new_keyframe_id))
dest_cursor_3 = dest_conn.cursor()
dest_cursor_3.execute("SELECT * FROM KeyFrame WHERE KeyFrameId = '{}'".format(new_keyframe_id))
if dest_cursor_3.fetchall() == []:
print("---------- KeyFrame {} does not exist on the destination, adding".format(keyframe_id))
# Recreate uuid's within verticies
keyframe_data_entries = keyframe_row[5].split('|')
entries = []
for entry in keyframe_data_entries[:-1]:
components = entry.split('*')
components[0] = 'id:{}'.format(uuid.uuid1())
s = '*'
entries.append(s.join(components))
verticies_data = "|".join(entries) + '|'
print('----------- Recreated Vertices with new UUID, OLD = {}, New = {}'.format(keyframe_row[5], verticies_data))
print("------------ DEBUG INSERT INTO KeyFrame VALUE('{}','{}','{}','{}','{}','{}')".format(new_keyframe_id, new_aoi_id, keyframe_row[2], keyframe_row[3], keyframe_row[4], verticies_data))
dest_cursor_3.execute("INSERT INTO KeyFrame VALUES('{}','{}','{}','{}','{}','{}')".format(new_keyframe_id, new_aoi_id, keyframe_row[2], keyframe_row[3], keyframe_row[4], verticies_data))
dest_conn.commit()
else:
print("----------- KeyFrame ID {} exists on the destination, ignoring".format(keyframe_id))
else:
print('---- AOI {} already exists on Destination, ignoring'.format(aoi_name))
else:
print('Found that video {} exist on source but not destination, ignoring'.format(video_name))
print('Finished processing source media')
# Capture Project ID's
for project_row in source_cursor_1.execute("SELECT ProjectId from Project"):
source_project_id = project_row[0]
for project_row in dest_cursor_1.execute("SELECT ProjectId from Project"):
dest_project_id = project_row[0]
print('Processing AOI Groups')
# Process each MovingAOIGroup on the source, if the name exists in the destination then use the dest id, otherwise, recreate with a new id
moving_aoi_group_capture = {}
moving_aoi_group_capture['source'] = {}
moving_aoi_group_capture['dest'] = {}
# Keep track of group id's, we will need to remap original names to new
moving_aoi_source_group_id_to_dest_group_id = {}
for moving_aoi_group_row in source_cursor_1.execute("SELECT * FROM MovingAOIGroup"):
moving_aoi_group_capture['source'][moving_aoi_group_row[2]] = {'color': moving_aoi_group_row[3], 'version_no': moving_aoi_group_row[4], 'group_uuid': moving_aoi_group_row[0]}
for moving_aoi_group_row in dest_cursor_1.execute("SELECT * FROM MovingAOIGroup"):
moving_aoi_group_capture['dest'][moving_aoi_group_row[2]] = {'color': moving_aoi_group_row[3], 'version_no': moving_aoi_group_row[4], 'group_uuid': moving_aoi_group_row[0]}
moving_aoi_group_lookup = {}
for item in moving_aoi_group_capture['source']:
if item not in moving_aoi_group_capture['dest']:
print('- AOI Group {} does not exist on Destination, adding'.format(item))
item_uuid = uuid.uuid1()
print('-- Generated AOI Group {} UUID {}'.format(item, item_uuid))
print("--- DEBUG INSERT INTO MovingAoiGroup VALUES('{}','{}','{}','{}','{}')".format(item_uuid, dest_project_id, item, moving_aoi_group_capture['source'][item]['color'], moving_aoi_group_capture['source'][item]['version_no']))
dest_cursor_1.execute("INSERT INTO MovingAoiGroup VALUES('{}','{}','{}','{}','{}')".format(item_uuid, dest_project_id, item, moving_aoi_group_capture['source'][item]['color'], moving_aoi_group_capture['source'][item]['version_no']))
dest_conn.commit()
moving_aoi_source_group_id_to_dest_group_id[moving_aoi_group_capture['source'][item]['group_uuid']] = item_uuid
else:
print('- AOI Group {} exists on Destination, using existing')
moving_aoi_source_group_id_to_dest_group_id[moving_aoi_group_capture['source'][item]['group_uuid']] = moving_aoi_group_capture['dest'][item]['group_id']
print('Processing AOI Groups - MovingAOI Members')
for moving_aoi_moving_aoi_group_row in source_cursor_1.execute("SELECT * FROM MovingAOI_MovingAOIGroup"):
if moving_aoi_moving_aoi_group_row[1] in old_aoi_uuid_to_new_aoi_uuid:
for new_aoi_uuid in old_aoi_uuid_to_new_aoi_uuid[moving_aoi_moving_aoi_group_row[1]]:
connection_id = uuid.uuid1()
print("--- DEBUG INSERT INTO MovingAoi_MovingAoiGroup VALUES('{}','{}','{}')".format(connection_id, new_aoi_uuid ,moving_aoi_source_group_id_to_dest_group_id[moving_aoi_moving_aoi_group_row[2]]))
dest_cursor_1.execute("INSERT INTO MovingAoi_MovingAoiGroup VALUES('{}','{}','{}')".format(connection_id, new_aoi_uuid ,moving_aoi_source_group_id_to_dest_group_id[moving_aoi_moving_aoi_group_row[2]]))
dest_conn.commit()
source_conn.close()
dest_conn.close()
print('Finished')
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
from spinnaker.configurator import Configurator
from spinnaker.validate_configuration import ValidateConfig
from spinnaker.yaml_util import YamlBindings
class ValidateConfigurationTest(unittest.TestCase):
def test_is_reference_good(self):
bindings = YamlBindings()
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
self.assertTrue(validator.is_reference('${a}'))
self.assertTrue(validator.is_reference('${a:value'))
def test_is_reference_bad(self):
bindings = YamlBindings()
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
self.assertFalse(validator.is_reference('str'))
self.assertFalse(validator.is_reference('true'))
self.assertFalse(validator.is_reference('0'))
self.assertFalse(validator.is_reference('not ${a}'))
def test_true_false_good(self):
bindings = YamlBindings()
bindings.import_dict(
{'t': True, 'f':False, 'indirect':'${t}', 'default': '${x:true}'})
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
self.assertTrue(validator.verify_true_false('t'))
self.assertTrue(validator.verify_true_false('f'))
self.assertTrue(validator.verify_true_false('indirect'))
self.assertTrue(validator.verify_true_false('default'))
def test_true_false_bad(self):
bindings = YamlBindings()
bindings.import_dict(
{'t': 'true', 'f':'false', 'indirect':'${t}', 'default': '${x:0}'})
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
self.assertFalse(validator.verify_true_false('t'))
self.assertFalse(validator.verify_true_false('f'))
self.assertFalse(validator.verify_true_false('indirect'))
self.assertFalse(validator.verify_true_false('default'))
self.assertEqual(4, len(validator.errors))
self.assertEqual(0, len(validator.warnings))
self.assertEqual(
["t='true' is not valid. Must be boolean true or false.",
"f='false' is not valid. Must be boolean true or false.",
"indirect='true' is not valid. Must be boolean true or false.",
"default=0 is not valid. Must be boolean true or false."],
validator.errors)
def test_true_false_not_resolved(self):
bindings = YamlBindings()
bindings.import_dict({'indirect': '${t}'})
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
self.assertFalse(validator.verify_true_false('indirect'))
self.assertEqual('Missing "indirect".', validator.errors[0])
def host_test_helper(self, tests, valid, required=False):
bindings = YamlBindings()
bindings.import_dict(tests)
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
for key, value in tests.items():
msg = '"{key}" was {valid}'.format(
key=key, valid='invalid' if valid else 'valid')
self.assertEqual(valid, validator.verify_host(key, required), msg)
return validator
def test_verify_host_good(self):
tests = {
'short': 'localhost',
'numeric': '0.0.0.0',
'ipv6-standard': '2607:f8b0:4001:0c20:0000:0000:0000:0066',
'ipv6-short-zero': '2607:f8b0:4001:c20:0:0:0:66',
'dot1': 'my.host',
'dot2': 'my.host.name',
'hyphen': 'this.is-a.host1234',
}
self.host_test_helper(tests, True)
def test_verify_host_bad(self):
tests = {
'upper': 'LOCALHOST',
'under': 'local_host',
'space': 'local host',
'slash': 'localhost/foo',
'colon': 'localhost:80',
'illegal': '-invalid-'
}
validator = self.host_test_helper(tests, False)
self.assertTrue(validator.errors[0].startswith('name="LOCALHOST"'))
def test_verify_host_missing(self):
tests = {
'unresolved': '${whatever}'
}
validator = self.host_test_helper(tests, False, required=True)
self.assertEquals('Missing "unresolved".', validator.errors[0])
self.assertFalse(validator.verify_host('missing', True))
self.assertEquals('No host provided for "missing".',
validator.errors[len(tests)])
def test_verify_host_optional_ok(self):
tests = {
'ok': 'localhost',
'unresolved': '${whatever}',
}
self.host_test_helper(tests, True, required=False)
def baseUrl_test_helper(self, tests, valid, scheme_optional):
bindings = YamlBindings()
bindings.import_dict(tests)
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
for key, value in tests.items():
msg = '"{key}" was {valid}'.format(
key=key, valid='invalid' if valid else 'valid')
self.assertEqual(
valid,
validator.verify_baseUrl(key, True,
scheme_optional=scheme_optional),
msg)
def test_verify_baseUrl_only_host_ok(self):
tests = {
'localhost': 'localhost',
'ip4': '10.20.30.40',
'ip4-short': '1.2.3.4',
'ip4-long': '255.255.255.255',
'ipv6-standard': '2607:f8b0:4001:0c20:0000:0000:0000:0066',
'ipv6-short-zero': '2607:f8b0:4001:c20:0:0:0:66',
'ipv6-abbrev-mid': '2607:f8b0:4001:c20::66',
'ipv6-abbrev-end': '2607:f8b0:4001:c20::',
'too-generous': '256.300.500.999', # invalid, but accept anyway
'domain': 'foo.bar',
'fulldomain': 'foo.bar.baz.test',
'mixed': 'myhost32.sub-domain23',
'reference': '${ip4}',
# These aren't valid, but are accepted as valid
# to keep the implementation simple.
# They are here for documentation, but are not necessarily
# guaranteed to pass in the future.
'ipv6-badabbrev3': '2607:f8b0:4001:c20:::66',
'ipv6-multi-colon': '2607:f8b0::c20::',
}
self.baseUrl_test_helper(tests, True, scheme_optional=True)
def test_verify_baseUrl_only_host_bad(self):
tests = {
'leading_int': '32my',
'too-few': '10.20.30',
'too-many': '10.20.30.40.50',
'trailing-dot': '10.20.30.40.',
'undef': '${unknown}',
'capital': 'myHost',
'trailing-dot-again': 'myhost.'
}
self.baseUrl_test_helper(tests, False, scheme_optional=True)
def test_verify_baseUrl_only_host_port_ok(self):
tests = {
'localhost': 'localhost:123',
'ip4': '10.20.30.40:456',
'domain': 'foo.bar:789',
'fulldomain': 'foo.bar.baz.test:980',
'mixed': 'myhost32-test.sub-domain23:32'
}
self.baseUrl_test_helper(tests, True, scheme_optional=True)
def test_verify_baseUrl_only_host_port_bad(self):
tests = {
'letters': 'test:abc',
'mixed': 'test:123a',
'empty': 'test:'
}
self.baseUrl_test_helper(tests, False, scheme_optional=True)
def test_verify_baseUrl_only_host_port_path_ok(self):
tests = {
'simple': 'localhost:123/simple',
'noport': 'localhost/simple',
'ip4': '10.20.30.40:456/simple',
'deep': 'localhost/parent/child/leaf',
'dir': 'foo.bar.baz.test:980/dir/',
'numeric': 'host/012345',
'root': 'host/',
'mixed': 'myhost32-test.sub-domain23:123/root-path',
'escaped': 'host/spaced%32path',
'escapedhex': 'host/spaced%afpath',
'escapednumeric': 'host/spaced%321',
'jumple': 'host/%32path+-._',
'ref': '${root}${root}'
}
self.baseUrl_test_helper(tests, True, scheme_optional=True)
def test_verify_baseUrl_only_host_port_path_bad(self):
tests = {
'onlypath': '/bad',
'undef': 'localhost/${undef}',
'badescape0': 'host/bad%',
'badescape1': 'host/bad%1',
'badescapeX': 'host/bad%gg',
'space': 'host/bad space',
'query': 'host/path?name',
'frag': 'host/path#frag',
}
self.baseUrl_test_helper(tests, False, scheme_optional=True)
def test_verify_baseUrl_scheme_ok(self):
tests = {
'host': 'http://localhost',
'port': 'https://localhost:123',
'path': 'http://localhost/path',
}
self.baseUrl_test_helper(tests, True, scheme_optional=True)
def test_verify_baseUrl_scheme_bad(self):
tests = {
'nocolon': 'https//localhost:123',
'nonetloc': 'http:///path',
}
self.baseUrl_test_helper(tests, False, scheme_optional=True)
def test_verify_baseUrl_scheme_required_ok(self):
tests = {
'host': 'http://host',
'host_port': 'http://host:80',
'host_path': 'http://host/path',
'host_port_path': 'http://host:80/path'
}
self.baseUrl_test_helper(tests, True, scheme_optional=False)
def test_verify_baseUrl_scheme_required_bad(self):
tests = {
'scheme': 'http',
'scheme_colon': 'http://',
'host': 'localhost',
'host_port': 'host:80',
'host_port_path': 'host:80/path',
'nohost': 'http://'
}
self.baseUrl_test_helper(tests, False, scheme_optional=False)
def test_verify_user_access_only_good(self):
bindings = YamlBindings()
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
fd, temp = tempfile.mkstemp()
os.close(fd)
try:
os.chmod(temp, 0400)
self.assertTrue(validator.verify_user_access_only(temp))
os.chmod(temp, 0600)
self.assertTrue(validator.verify_user_access_only(temp))
finally:
os.remove(temp)
def test_verify_user_access_only_bad(self):
bindings = YamlBindings()
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
fd, temp = tempfile.mkstemp()
os.close(fd)
try:
os.chmod(temp, 0410)
self.assertFalse(validator.verify_user_access_only(temp))
self.assertEqual(
'"{temp}" should not have non-owner access. Mode is 410.'
.format(temp=temp),
validator.errors[0])
os.chmod(temp, 0420)
self.assertFalse(validator.verify_user_access_only(temp))
os.chmod(temp, 0440)
self.assertFalse(validator.verify_user_access_only(temp))
os.chmod(temp, 0401)
self.assertFalse(validator.verify_user_access_only(temp))
os.chmod(temp, 0402)
self.assertFalse(validator.verify_user_access_only(temp))
os.chmod(temp, 0404)
self.assertFalse(validator.verify_user_access_only(temp))
finally:
os.remove(temp)
def test_verify_at_least_one_provider_enabled_good(self):
bindings = YamlBindings()
bindings.import_dict({
'providers': {
'aws': { 'enabled': False },
'google': {'enabled': False },
'another': {'enabled': True }
},
})
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
self.assertTrue(validator.verify_at_least_one_provider_enabled())
def test_verify_at_least_one_provider_enabled_bad(self):
bindings = YamlBindings()
bindings.import_dict({
'providers': {
'aws': { 'enabled': False },
'google': {'enabled': False }
},
'services': {'test': { 'enabled': True }}
})
validator = ValidateConfig(
configurator=Configurator(bindings=bindings))
self.assertFalse(validator.verify_at_least_one_provider_enabled())
self.assertEqual('None of the providers are enabled.',
validator.errors[0])
if __name__ == '__main__':
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(ValidateConfigurationTest)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
# The MIT License (MIT)
#
# Copyright (c) 2014, Sam Bayless
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute,
# sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or
# substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
# NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
# OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import monosat.monosat_c
import sys
from monosat.bvtheory import BitVector
from monosat.logic import *
from monosat.manager import Manager
debug = False
# Collects a set of graphs to encode together into a formula
class GraphManager(metaclass=Manager):
def __init__(self):
self.graphs = []
def clear(self):
self.graphs = []
def addGraph(self, g):
self.graphs.append(g)
class Graph:
class GraphType:
int = 1
float = 2
rational = 3
def __init__(self, graph_type=1):
self._monosat = monosat.monosat_c.Monosat()
manager = GraphManager()
manager.addGraph(self)
self.graph = self._monosat.newGraph()
self.id = len(manager.graphs)
self.has_any_bv_edges = False
self.has_any_non_bv_edges = False
self.nodes = 0
self.numedges = 0
self.names = dict()
self.nodemap = dict() # maps node names to node objects
self.out_edges = []
self.in_edges = []
self.out_edge_map = []
self.in_edge_map = []
self.queries = []
self.queryLookup = dict()
self.alledges = []
self.distance_rational_queries = []
self.distance_float_queries = []
self.dbg_reaches = []
self.graph_type = graph_type
self.edge_priorities = []
# map from variables to edges
self.all_undirectededges = []
self.edgemap = dict()
self.acyclic_querries = []
def __getitem__(self, key):
if isinstance(key, str):
return self.nodemap[key]
if isinstance(key, tuple) or isinstance(obj, list):
if len(key) == 2:
# interpret the key as an edge
return self.getEdge(
self.nodemap[str(key[0])], self.nodemap[str(key[1])]
)
return self.nodemap[str(key)]
def __contains__(self, item):
return str(item) in self.nodemap.keys()
def assignWeightsTo(self, w):
self._monosat.assignWeightsTo(self.graph, w)
def enforceRouting(self, source, destination, nets, maxflowlit):
netlits = []
for edge_lits, reach_lits, disabled_edge in nets:
netlits.append(
(
[x.getLit() for x in edge_lits],
[x.getLit() for x in reach_lits],
disabled_edge.getLit(),
)
)
self._monosat.enforceRouting(
self.graph, source, destination, netlits, maxflowlit.getLit()
)
def writeDot(self, out=sys.stdout, writeModel=True):
print("digraph{", file=out)
for n in range(self.nodes):
print("n%d" % (n), file=out)
for (v, w, var, weight) in self.getEdges():
if not writeModel:
if weight:
print(
'n%d -> n%d [label="v%s, w%s"]' % (v, w, str(var), str(weight)),
file=out,
)
else:
print('n%d -> n%d [label="v%s"]' % (v, w, str(var)), file=out)
else:
edgeVal = var.value()
if edgeVal is None:
edgecol = "black"
elif edgeVal:
edgecol = "red"
else:
edgecol = "blue"
if weight:
if edgeVal is not None:
weightVal = weight.value()
print(
'n%d -> n%d [label="v%s, w%s=%s", color=%s]'
% (v, w, str(var), str(weight), str(weightVal), edgecol),
file=out,
)
else:
print(
'n%d -> n%d [label="v%s, w%s"]'
% (v, w, str(var), str(weight)),
file=out,
)
else:
print(
'n%d -> n%d [label="v%s", color=%s]'
% (v, w, str(var), edgecol),
file=out,
)
print("}", file=out)
def addNode(self, name=None):
n = self._monosat.newNode(self.graph)
self.nodes = n + 1
self.out_edges.append([])
self.in_edges.append([])
if name is None:
name = str(n)
else:
name = str(name)
if name is not None and name in self.names:
raise ValueError("Node %s already exists" % (str(name)))
self.names[n] = name
self.nodemap[name] = n
self.out_edge_map.append(dict())
self.in_edge_map.append(dict())
return n
def getSymbol(self, node):
return self.names[node]
def getMaxFlow(self, flowlit):
return self._monosat.getModel_MaxFlow(self.graph, flowlit.getLit())
def getEdgeFlow(self, flowlit, edgelit, force_acyclic_flow=False):
if force_acyclic_flow:
return self._monosat.getModel_AcyclicEdgeFlow(
self.graph, flowlit.getLit(), edgelit.getLit()
)
else:
return self._monosat.getModel_EdgeFlow(
self.graph, flowlit.getLit(), edgelit.getLit()
)
"""
Get a path in the graph that satisfies the reachability or shortest path lit, if the shortest path lit is true in the model
If 'return_edge_lits' is True, then return the path as a list of edge literals. Otherwise, returns the path as a list of nodes.
Must not be caled before solve().
"""
def getPath(self, reach_or_shortest_path_lit, return_edge_lits=False):
if not return_edge_lits:
return self._monosat.getModel_Path_Nodes(
self.graph, reach_or_shortest_path_lit.getLit()
)
else:
lits = self._monosat.getModel_Path_EdgeLits(
self.graph, reach_or_shortest_path_lit.getLit()
)
if lits is None:
return None
lit_list = []
for lit in lits:
lit_list.append(Var(lit))
return lit_list
# Returns either a list containing all the edges from f to t, or, if there is only 1 edge from f to t, returns that edge.
# throws a ValueError if there are no edges from f to t.
def getEdge(self, f, t):
if t in self.out_edge_map[f]:
edges = self.out_edge_map[f][t]
assert len(edges) > 0
if len(edges) == 1:
return edges[
0
] # the common case is that the graph has no multiedges, so handle that specially
else:
return edges
raise ValueError("No edge " + str(f) + "->" + str(t))
def getAllEdges(self, undirected=False):
if undirected:
return self.all_undirectededges
else:
return self.alledges
# returns the variable corresponding to the backward (directed) edge of the edge
# corresponding to this variable, if such an edge exists. Returns None otherwise.
def backEdgeVar(self, v):
(v, w, var, weight) = self.getEdgeFromVar(v)
if v in self.out_edge_map[w]:
edges = self.out_edge_map[w][v]
assert len(edges) > 0
return edges[0][2]
return None
def hasEdge(self, f, t):
return t in self.out_edge_map[f] and len(self.out_edge_map[f][t]) > 0
# for (v,u,var,weight) in self.out_edges[f]:
# if(u==t):
# return True;
# return False;
def newEdgeSet(self, edges, enforceEdgeAssignments=True):
for v in edges:
assert v.getLit() in self.edgemap
edgelits = [v.getLit() for v in edges]
self._monosat.newEdgeSet(self.graph, edgelits, enforceEdgeAssignments)
# add edge from v to w
def addEdge(self, v, w, weight=1):
while v >= self.numNodes() or w >= self.numNodes():
self.addNode()
if weight and isinstance(weight, float):
assert (
self.graph_type == Graph.GraphType.float
or self.graph_type == Graph.GraphType.rational
)
elif weight and isinstance(weight, tuple):
assert self.graph_type == Graph.GraphType.rational
if weight and isinstance(weight, BitVector):
assert self.graph_type == Graph.GraphType.int
self.has_any_bv_edges = True
assert not self.has_any_non_bv_edges
var = Var(self._monosat.newEdge_bv(self.graph, v, w, weight.getID()))
else:
self.has_any_non_bv_edges = True
assert not self.has_any_bv_edges
if self.graph_type == Graph.GraphType.int:
var = Var(self._monosat.newEdge(self.graph, v, w, weight))
elif self.graph_type == Graph.GraphType.float:
var = Var(self._monosat.newEdge_double(self.graph, v, w, weight))
e = (v, w, var, weight)
self.alledges.append(e)
self.numedges = self.numedges + 1
self.out_edges[v].append(e)
self.in_edges[w].append(e)
self.edgemap[e[2].getLit()] = e
if w not in self.out_edge_map[v].keys():
self.out_edge_map[v][w] = list()
if v not in self.in_edge_map[w].keys():
self.in_edge_map[w][v] = list()
self.out_edge_map[v][w].append(e)
self.in_edge_map[w][v].append(e)
return e[2]
def addUndirectedEdge(self, v, w, weight=1):
while v >= self.numNodes() or w >= self.numNodes():
self.addNode()
if weight and isinstance(weight, float):
assert (
self.graph_type == Graph.GraphType.float
or self.graph_type == Graph.GraphType.rational
)
elif weight and isinstance(weight, tuple):
assert self.graph_type == Graph.GraphType.rational
if weight and isinstance(weight, BitVector):
assert self.graph_type == Graph.GraphType.int
self.has_any_bv_edges = True
assert not self.has_any_non_bv_edges
v1 = Var(self._monosat.newEdge_bv(self.graph, v, w, weight.getID()))
v2 = Var(self._monosat.newEdge_bv(self.graph, w, v, weight.getID()))
else:
self.has_any_non_bv_edges = True
assert not self.has_any_bv_edges
if self.graph_type == Graph.GraphType.int:
v1 = Var(self._monosat.newEdge(self.graph, v, w, weight))
v2 = Var(self._monosat.newEdge(self.graph, w, v, weight))
elif self.graph_type == Graph.GraphType.float:
v1 = Var(self._monosat.newEdge_double(self.graph, v, w, weight))
v2 = Var(self._monosat.newEdge_double(self.graph, w, v, weight))
e1 = (v, w, v1, weight)
self.alledges.append(e1)
self.numedges = self.numedges + 1
self.out_edges[v].append(e1)
self.in_edges[w].append(e1)
e2 = (w, v, v2, weight)
self.alledges.append(e2)
self.numedges = self.numedges + 1
self.out_edges[w].append(e2)
self.in_edges[v].append(e2)
self.edgemap[v1.getLit()] = e1
self.edgemap[v2.getLit()] = e2
AssertEq(v1, v2)
self.all_undirectededges.append(e1)
if w not in self.out_edge_map[v].keys():
self.out_edge_map[v][w] = list()
if v not in self.in_edge_map[w].keys():
self.in_edge_map[w][v] = list()
self.out_edge_map[v][w].append(e1)
self.in_edge_map[w][v].append(e1)
if v not in self.out_edge_map[w].keys():
self.out_edge_map[w][v] = list()
if w not in self.in_edge_map[v].keys():
self.in_edge_map[v][w] = list()
# add the edge twice, one for each direction.
self.out_edge_map[w][v].append(e2)
self.in_edge_map[v][w].append(e2)
return v1
def setEdgePriority(self, edgeVar, priority):
self.edge_priorities.append((edgeVar, priority))
def numNodes(self):
return self.nodes
def numEdges(self):
return self.numedges
def nNodes(self):
return self.nodes
def nEdges(self):
return self.numedges
def getNodes(self):
return range(self.nodes)
def getEdgeFromVar(self, var):
return self.edgemap[var.getLit()]
def getEdges(self, node=-1, undirected=False):
if node >= 0:
for edge in self.out_edges[node]:
yield edge
if undirected:
for edge in self.in_edges[node]:
yield edge
else:
for node in self.out_edges:
for edge in node:
yield edge
def getOutgoingEdges(self, node=-1):
if node >= 0:
for edge in self.out_edges[node]:
yield edge
else:
for node in self.out_edges:
for edge in node:
yield edge
def getIncomingEdges(self, node=-1):
if node >= 0:
for edge in self.in_edges[node]:
yield edge
else:
for node in self.in_edges:
for edge in node:
yield edge
def getEdgeVars(self, node=-1):
if node >= 0:
for edge in self.out_edges[node]:
yield edge[2]
else:
for node in self.out_edges:
for edge in node:
yield edge[2]
def getOutgoingEdgeVars(self, node=-1):
if node >= 0:
for edge in self.out_edges[node]:
yield edge[2]
else:
for node in self.out_edges:
for edge in node:
yield edge[2]
def getIncomingEdgeVars(self, node=-1):
if node >= 0:
for edge in self.in_edges[node]:
yield edge[2]
else:
for node in self.in_edges:
for edge in node:
yield edge[2]
# def createSteinerTree(self):
# s = Graph.SteinerTree(len(self.steiners))
# self.steiners.append(s)
# return s
# Shadow the graph theory with a (slow) implementation in the circuit, for debugging purposes
"""def _reachesAnyCircuit(self,start,n=None):
assert(False)
if(n is None):
n=self.numNodes()
n=int(n)
#bellman-ford:
if(n>self.numNodes()):
n=self.numNodes();
reaches=[Var(False)]*self.numNodes()
reaches[start]=Var(True)
for i in range(0,n):
#For each edge:
for (v,w,var) in self.getEdges():
reaches[w]=(var & reaches[v])| reaches[w]
return reaches"""
def distance_rational_leq(self, start, to, distance_fraction):
v = (
Var()
) # "distance_rational_leq(%d,%d,%d,%d,%d)"%(self.id, start,to,distance_numerator,distance_denominator))
self.distance_rational_queries.append(("leq", start, distance_fraction, to, v))
return v
def distance_rational_lt(self, start, to, distance_fraction):
v = (
Var()
) # "distance_rational_leq(%d,%d,%d,%d,%d)"%(self.id, start,to,distance_numerator,distance_denominator))
self.distance_rational_queries.append(("lt", start, distance_fraction, to, v))
return v
def distance_leq(self, start, to, distance):
if isinstance(distance, tuple):
return self.distance_rational_leq(start, to, distance)
if isinstance(distance, BitVector):
v = Var(
self._monosat.shortestPath_leq_bv(
self.graph, start, to, distance.getID()
)
)
else:
v = Var(
self._monosat.shortestPath_leq_const(self.graph, start, to, distance)
)
# v = Var() #"distance_float_leq(%d,%d,%d,%d)"%(self.id, start,to,distance))
# self.distance_float_queries.append(('leq',start,distance,to,v))
return v
def distance_lt(self, start, to, distance):
if isinstance(distance, tuple):
return self.distance_rational_lt(start, to, distance)
if isinstance(distance, BitVector):
v = Var(
self._monosat.shortestPath_lt_bv(
self.graph, start, to, distance.getID()
)
)
else:
v = Var(
self._monosat.shortestPath_lt_const(self.graph, start, to, distance)
)
# v = Var() #"distance_float_leq(%d,%d,%d,%d)"%(self.id, start,to,distance))
# self.distance_float_queries.append(('lt',start,distance,to,v))
return v
def reaches(self, start, to, withinSteps=None):
if withinSteps is None or withinSteps < 0:
withinSteps = None
else:
withinSteps = int(withinSteps)
if withinSteps is not None and withinSteps >= self.numNodes():
withinSteps = None
# Check to see if we have already encoded this property
if (start, to, withinSteps) in self.queryLookup:
return self.queryLookup[(start, to, withinSteps)]
if withinSteps is None:
v = Var(self._monosat.reaches(self.graph, start, to))
else:
v = Var(
self._monosat.shortestPathUnweighted_leq_const(
self.graph, start, to, withinSteps
)
)
# v = Var("distance_leq(%d,%d,%d,%d)"%(self.id, start,to,withinSteps) if withinSteps is not None else "reaches(%d,%d,%d)"%(self.id, start,to))
# self.queries.append((start,withinSteps,to,v))
self.queryLookup[(start, to, withinSteps)] = v
return v
# Check each node for reachability from v in at most n steps
def reachesAny(self, start, n=None):
if n is None or n < 0:
n = None
else:
n = int(n)
if n is not None and n > self.numNodes():
n = self.numNodes()
reaches = []
for i in range(self.numNodes()):
if (start, i, n) in self.queryLookup:
reaches.append(self.queryLookup[(start, i, n)])
else:
if n is None:
v = Var(self._monosat.reaches(self.graph, start, i))
else:
v = Var(
self._monosat.shortestPathUnweighted_leq_const(
self.graph, start, i, n
)
)
reaches.append(v)
self.queryLookup[(start, i, n)] = v
return reaches
def reachesBackward(self, start, to):
"""
A reaches query that traverses edges backwards (eg, u reaches v if the edge v->u is in the graph)
"""
v = Var(self._monosat.reachesBackward(self.graph, start, to))
return v
def onPath(self, nodeOnPath, start, to):
"""
True iff there exists a path from start to nodeOnPath, AND there exists a path from nodeOnPath to 'to'
"""
v = Var(self._monosat.onPath(self.graph, nodeOnPath, start, to))
return v
def minimumSpanningTreeLessEq(self, minweight):
v = Var(self._monosat.minimumSpanningTree_leq(self.graph, minweight))
return v
def acyclic(self, directed=True):
if directed:
v = Var(self._monosat.acyclic_directed(self.graph))
else:
v = Var(self._monosat.acyclic_undirected(self.graph))
return v
def AssertMinimumSpanningTreeLessEq(self, minweight):
Assert(self.minimumSpanningTreeLessEq(minweight))
def edgeInMinimumSpanningTree(self, edgeVar):
varInTreeOrDisabled = Var()
self.mstEdgeQueries.append((edgeVar, varInTreeOrDisabled))
return And(varInTreeOrDisabled, edgeVar)
def AssertEdgeInMinimumSpanningTree(self, minweight):
Assert(self.minimumSpanningTree(minweight))
def maxFlowGreaterOrEqualTo(self, s, t, flow):
if isinstance(flow, BitVector):
v = Var(self._monosat.maximumFlow_geq_bv(self.graph, s, t, flow.getID()))
else:
v = Var(self._monosat.maximumFlow_geq(self.graph, s, t, flow))
return v
def AssertMaxFlowGreaterOrEqualTo(self, s, t, flow):
v = self.maxFlowGreaterOrEqualTo(s, t, flow)
Assert(v)
def AssertMaxFlowLessOrEqualTo(self, s, t, flow):
v = self.maxFlowGreaterOrEqualTo(s, t, flow + 1)
Assert(Not(v))
def connectedComponentsGreaterOrEqualTo(self, components):
v = Var(self._monosat.connectedComponents_geq_const(self.graph, components))
return v
def connectedComponentsLessOrEqualTo(self, components):
v = Not(self.connectedComponentsGreaterOrEqualTo(components+1))
return v
def AssertConnectedComponentsGreaterOrEqualTo(self, min_components):
Assert(self.connectedComponentsGreaterOrEqualTo(min_components))
def AssertConnectedComponentsEqualTo(self, min_components):
Assert(self.connectedComponentsGreaterOrEqualTo(min_components))
Assert(self.connectedComponentsLessOrEqualTo(min_components))
def AssertConnectedComponentsLessOrEqualto(self, min_components):
Assert(self.connectedComponentsLessOrEqualTo(min_components))
def draw(self):
print("digraph{")
for n in range(self.nodes):
print("n%d" % (n))
for (v, w, var, weight) in self.getAllEdges():
if weight is not None:
print("""n%d->n%d [label="%d w=%d"]""" % (v, w, var.getVar(), weight))
else:
print("""n%d->n%d [label="%d"]""" % (v, w, var.getVar()))
print("}")
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._top_level_domains_operations import build_get_request, build_list_agreements_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TopLevelDomainsOperations:
"""TopLevelDomainsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.TopLevelDomainCollection"]:
"""Get all top-level domains supported for registration.
Description for Get all top-level domains supported for registration.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TopLevelDomainCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.TopLevelDomainCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopLevelDomainCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TopLevelDomainCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains'} # type: ignore
@distributed_trace_async
async def get(
self,
name: str,
**kwargs: Any
) -> "_models.TopLevelDomain":
"""Get details of a top-level domain.
Description for Get details of a top-level domain.
:param name: Name of the top-level domain.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: TopLevelDomain, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_09_01.models.TopLevelDomain
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.TopLevelDomain"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
name=name,
subscription_id=self._config.subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('TopLevelDomain', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}'} # type: ignore
@distributed_trace
def list_agreements(
self,
name: str,
agreement_option: "_models.TopLevelDomainAgreementOption",
**kwargs: Any
) -> AsyncIterable["_models.TldLegalAgreementCollection"]:
"""Gets all legal agreements that user needs to accept before purchasing a domain.
Description for Gets all legal agreements that user needs to accept before purchasing a domain.
:param name: Name of the top-level domain.
:type name: str
:param agreement_option: Domain agreement options.
:type agreement_option: ~azure.mgmt.web.v2020_09_01.models.TopLevelDomainAgreementOption
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TldLegalAgreementCollection or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_09_01.models.TldLegalAgreementCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType["_models.TldLegalAgreementCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
_json = self._serialize.body(agreement_option, 'TopLevelDomainAgreementOption')
request = build_list_agreements_request(
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.list_agreements.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
_json = self._serialize.body(agreement_option, 'TopLevelDomainAgreementOption')
request = build_list_agreements_request(
name=name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TldLegalAgreementCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_agreements.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DomainRegistration/topLevelDomains/{name}/listAgreements'} # type: ignore
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import ast
import collections
import os
import re
from oslo.config import cfg
from six.moves import configparser
from six.moves.urllib import parse as urlparse
from pycadf import cadftaxonomy as taxonomy
from pycadf import cadftype
from pycadf import credential
from pycadf import endpoint
from pycadf import eventfactory as factory
from pycadf import host
from pycadf import identifier
from pycadf import reason
from pycadf import reporterstep
from pycadf import resource
from pycadf import tag
from pycadf import timestamp
# NOTE(gordc): remove cfg once we move over to this middleware version
CONF = cfg.CONF
opts = [cfg.StrOpt('api_audit_map',
default='api_audit_map.conf',
help='File containing mapping for api paths and '
'service endpoints')]
CONF.register_opts(opts, group='audit')
AuditMap = collections.namedtuple('AuditMap',
['path_kw',
'custom_actions',
'service_endpoints',
'default_target_endpoint_type'])
def _configure_audit_map(cfg_file):
"""Configure to recognize and map known api paths."""
path_kw = {}
custom_actions = {}
service_endpoints = {}
default_target_endpoint_type = None
if cfg_file:
try:
map_conf = configparser.SafeConfigParser()
map_conf.readfp(open(cfg_file))
try:
default_target_endpoint_type = \
map_conf.get('DEFAULT', 'target_endpoint_type')
except configparser.NoOptionError:
pass
try:
custom_actions = dict(map_conf.items('custom_actions'))
except configparser.Error:
pass
try:
path_kw = dict(map_conf.items('path_keywords'))
except configparser.Error:
pass
try:
service_endpoints = dict(map_conf.items('service_endpoints'))
except configparser.Error:
pass
except configparser.ParsingError as err:
raise PycadfAuditApiConfigError(
'Error parsing audit map file: %s' % err)
return AuditMap(path_kw=path_kw, custom_actions=custom_actions,
service_endpoints=service_endpoints,
default_target_endpoint_type=default_target_endpoint_type)
class ClientResource(resource.Resource):
def __init__(self, project_id=None, **kwargs):
super(ClientResource, self).__init__(**kwargs)
if project_id is not None:
self.project_id = project_id
class KeystoneCredential(credential.Credential):
def __init__(self, identity_status=None, **kwargs):
super(KeystoneCredential, self).__init__(**kwargs)
if identity_status is not None:
self.identity_status = identity_status
class PycadfAuditApiConfigError(Exception):
"""Error raised when pyCADF fails to configure correctly."""
class OpenStackAuditApi(object):
Service = collections.namedtuple('Service',
['id', 'name', 'type', 'admin_endp',
'public_endp', 'private_endp'])
def __init__(self, map_file=None):
if map_file is None:
map_file = CONF.audit.api_audit_map
if not os.path.exists(CONF.audit.api_audit_map):
map_file = cfg.CONF.find_file(CONF.audit.api_audit_map)
self._MAP = _configure_audit_map(map_file)
@staticmethod
def _clean_path(value):
return value[:-5] if value.endswith('.json') else value
def _get_action(self, req):
"""Take a given Request, parse url path to calculate action type.
Depending on req.method:
if POST: path ends with 'action', read the body and use as action;
path ends with known custom_action, take action from config;
request ends with known path, assume is create action;
request ends with unknown path, assume is update action.
if GET: request ends with known path, assume is list action;
request ends with unknown path, assume is read action.
if PUT, assume update action.
if DELETE, assume delete action.
if HEAD, assume read action.
"""
path = req.path[:-1] if req.path.endswith('/') else req.path
url_ending = self._clean_path(path[path.rfind('/') + 1:])
method = req.method
if url_ending + '/' + method.lower() in self._MAP.custom_actions:
action = self._MAP.custom_actions[url_ending + '/' +
method.lower()]
elif url_ending in self._MAP.custom_actions:
action = self._MAP.custom_actions[url_ending]
elif method == 'POST':
if url_ending == 'action':
try:
if req.json:
body_action = list(req.json.keys())[0]
action = taxonomy.ACTION_UPDATE + '/' + body_action
else:
action = taxonomy.ACTION_CREATE
except ValueError:
action = taxonomy.ACTION_CREATE
elif url_ending not in self._MAP.path_kw:
action = taxonomy.ACTION_UPDATE
else:
action = taxonomy.ACTION_CREATE
elif method == 'GET':
if url_ending in self._MAP.path_kw:
action = taxonomy.ACTION_LIST
else:
action = taxonomy.ACTION_READ
elif method == 'PUT' or method == 'PATCH':
action = taxonomy.ACTION_UPDATE
elif method == 'DELETE':
action = taxonomy.ACTION_DELETE
elif method == 'HEAD':
action = taxonomy.ACTION_READ
else:
action = taxonomy.UNKNOWN
return action
def _get_service_info(self, endp):
endpoint_id = endp['endpoints'][0].get('id', endp['name'])
service = self.Service(
type=self._MAP.service_endpoints.get(
endp['type'],
taxonomy.UNKNOWN),
name=endp['name'],
id=identifier.norm_ns(endpoint_id),
admin_endp=endpoint.Endpoint(
name='admin',
url=endp['endpoints'][0]['adminURL']),
private_endp=endpoint.Endpoint(
name='private',
url=endp['endpoints'][0]['internalURL']),
public_endp=endpoint.Endpoint(
name='public',
url=endp['endpoints'][0]['publicURL']))
return service
def _build_typeURI(self, req, service_type):
type_uri = ''
prev_key = None
for key in re.split('/', req.path):
key = self._clean_path(key)
if key in self._MAP.path_kw:
type_uri += '/' + key
elif prev_key in self._MAP.path_kw:
type_uri += '/' + self._MAP.path_kw[prev_key]
prev_key = key
return service_type + type_uri
def create_event(self, req, correlation_id):
action = self._get_action(req)
initiator_host = host.Host(address=req.client_addr,
agent=req.user_agent)
catalog = ast.literal_eval(req.environ['HTTP_X_SERVICE_CATALOG'])
service_info = self.Service(type=taxonomy.UNKNOWN,
name=taxonomy.UNKNOWN,
id=taxonomy.UNKNOWN,
admin_endp=None,
private_endp=None,
public_endp=None)
default_endpoint = None
for endp in catalog:
admin_urlparse = urlparse.urlparse(
endp['endpoints'][0]['adminURL'])
public_urlparse = urlparse.urlparse(
endp['endpoints'][0]['publicURL'])
req_url = urlparse.urlparse(req.host_url)
if (req_url.netloc == admin_urlparse.netloc
or req_url.netloc == public_urlparse.netloc):
service_info = self._get_service_info(endp)
break
elif (self._MAP.default_target_endpoint_type
and endp['type'] == self._MAP.default_target_endpoint_type):
default_endpoint = endp
else:
if default_endpoint:
service_info = self._get_service_info(default_endpoint)
initiator = ClientResource(
typeURI=taxonomy.ACCOUNT_USER,
id=identifier.norm_ns(str(req.environ['HTTP_X_USER_ID'])),
name=req.environ['HTTP_X_USER_NAME'],
host=initiator_host,
credential=KeystoneCredential(
token=req.environ['HTTP_X_AUTH_TOKEN'],
identity_status=req.environ['HTTP_X_IDENTITY_STATUS']),
project_id=identifier.norm_ns(req.environ['HTTP_X_PROJECT_ID']))
target_typeURI = (self._build_typeURI(req, service_info.type)
if service_info.type != taxonomy.UNKNOWN
else service_info.type)
target = resource.Resource(typeURI=target_typeURI,
id=service_info.id,
name=service_info.name)
if service_info.admin_endp:
target.add_address(service_info.admin_endp)
if service_info.private_endp:
target.add_address(service_info.private_endp)
if service_info.public_endp:
target.add_address(service_info.public_endp)
event = factory.EventFactory().new_event(
eventType=cadftype.EVENTTYPE_ACTIVITY,
outcome=taxonomy.OUTCOME_PENDING,
action=action,
initiator=initiator,
target=target,
observer=resource.Resource(id='target'))
event.requestPath = req.path_qs
event.add_tag(tag.generate_name_value_tag('correlation_id',
correlation_id))
return event
def append_audit_event(self, req):
"""Append a CADF event to req.environ['CADF_EVENT']
Also, stores model in request for future process and includes a
CADF correlation id.
"""
correlation_id = identifier.generate_uuid()
req.environ['CADF_EVENT_CORRELATION_ID'] = correlation_id
event = self.create_event(req, correlation_id)
setattr(req, 'cadf_model', event)
req.environ['CADF_EVENT'] = event.as_dict()
def mod_audit_event(self, req, response):
"""Modifies CADF event in request based on response.
If no event exists, a new event is created.
"""
if response:
if response.status_int >= 200 and response.status_int < 400:
result = taxonomy.OUTCOME_SUCCESS
else:
result = taxonomy.OUTCOME_FAILURE
else:
result = taxonomy.UNKNOWN
if hasattr(req, 'cadf_model'):
req.cadf_model.add_reporterstep(
reporterstep.Reporterstep(
role=cadftype.REPORTER_ROLE_MODIFIER,
reporter=resource.Resource(id='target'),
reporterTime=timestamp.get_utc_now()))
else:
self.append_audit_event(req)
req.cadf_model.outcome = result
if response:
req.cadf_model.reason = \
reason.Reason(reasonType='HTTP',
reasonCode=str(response.status_int))
req.environ['CADF_EVENT'] = req.cadf_model.as_dict()
| |
"""The tests for the Recorder component."""
# pylint: disable=protected-access
from datetime import datetime, timedelta
from unittest.mock import patch
from sqlalchemy.exc import OperationalError
from homeassistant.components.recorder import (
CONFIG_SCHEMA,
DOMAIN,
Recorder,
run_information,
run_information_from_instance,
run_information_with_session,
)
from homeassistant.components.recorder.const import DATA_INSTANCE
from homeassistant.components.recorder.models import Events, RecorderRuns, States
from homeassistant.components.recorder.util import session_scope
from homeassistant.const import MATCH_ALL, STATE_LOCKED, STATE_UNLOCKED
from homeassistant.core import Context, callback
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from .common import wait_recording_done
from tests.common import fire_time_changed, get_test_home_assistant
def test_saving_state(hass, hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder()
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
hass.states.set(entity_id, state, attributes)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) == 1
assert db_states[0].event_id > 0
state = db_states[0].to_native()
assert state == _state_empty_context(hass, entity_id)
def test_saving_state_with_exception(hass, hass_recorder, caplog):
"""Test saving and restoring a state."""
hass = hass_recorder()
entity_id = "test.recorder"
state = "restoring_from_db"
attributes = {"test_attr": 5, "test_attr_10": "nice"}
def _throw_if_state_in_session(*args, **kwargs):
for obj in hass.data[DATA_INSTANCE].event_session:
if isinstance(obj, States):
raise OperationalError(
"insert the state", "fake params", "forced to fail"
)
with patch("time.sleep"), patch.object(
hass.data[DATA_INSTANCE].event_session,
"flush",
side_effect=_throw_if_state_in_session,
):
hass.states.set(entity_id, "fail", attributes)
wait_recording_done(hass)
assert "Error executing query" in caplog.text
assert "Error saving events" not in caplog.text
caplog.clear()
hass.states.set(entity_id, state, attributes)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
db_states = list(session.query(States))
assert len(db_states) >= 1
assert "Error executing query" not in caplog.text
assert "Error saving events" not in caplog.text
def test_saving_event(hass, hass_recorder):
"""Test saving and restoring an event."""
hass = hass_recorder()
event_type = "EVENT_TEST"
event_data = {"test_attr": 5, "test_attr_10": "nice"}
events = []
@callback
def event_listener(event):
"""Record events from eventbus."""
if event.event_type == event_type:
events.append(event)
hass.bus.listen(MATCH_ALL, event_listener)
hass.bus.fire(event_type, event_data)
wait_recording_done(hass)
assert len(events) == 1
event = events[0]
hass.data[DATA_INSTANCE].block_till_done()
with session_scope(hass=hass) as session:
db_events = list(session.query(Events).filter_by(event_type=event_type))
assert len(db_events) == 1
db_event = db_events[0].to_native()
assert event.event_type == db_event.event_type
assert event.data == db_event.data
assert event.origin == db_event.origin
# Recorder uses SQLite and stores datetimes as integer unix timestamps
assert event.time_fired.replace(microsecond=0) == db_event.time_fired.replace(
microsecond=0
)
def _add_entities(hass, entity_ids):
"""Add entities."""
attributes = {"test_attr": 5, "test_attr_10": "nice"}
for idx, entity_id in enumerate(entity_ids):
hass.states.set(entity_id, f"state{idx}", attributes)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
return [st.to_native() for st in session.query(States)]
def _add_events(hass, events):
with session_scope(hass=hass) as session:
session.query(Events).delete(synchronize_session=False)
for event_type in events:
hass.bus.fire(event_type)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
return [ev.to_native() for ev in session.query(Events)]
def _state_empty_context(hass, entity_id):
# We don't restore context unless we need it by joining the
# events table on the event_id for state_changed events
state = hass.states.get(entity_id)
state.context = Context(id=None)
return state
# pylint: disable=redefined-outer-name,invalid-name
def test_saving_state_include_domains(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"include": {"domains": "test2"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_include_domains_globs(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"include": {"domains": "test2", "entity_globs": "*.included_*"}}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test3.included_entity"]
)
assert len(states) == 2
assert _state_empty_context(hass, "test2.recorder") == states[0]
assert _state_empty_context(hass, "test3.included_entity") == states[1]
def test_saving_state_incl_entities(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"include": {"entities": "test2.recorder"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_event_exclude_event_type(hass_recorder):
"""Test saving and restoring an event."""
hass = hass_recorder(
{
"exclude": {
"event_types": [
"service_registered",
"homeassistant_start",
"component_loaded",
"core_config_updated",
"homeassistant_started",
"test",
]
}
}
)
events = _add_events(hass, ["test", "test2"])
assert len(events) == 1
assert events[0].event_type == "test2"
def test_saving_state_exclude_domains(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"exclude": {"domains": "test"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_domains_globs(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"exclude": {"domains": "test", "entity_globs": "*.excluded_*"}}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test2.excluded_entity"]
)
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_entities(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder({"exclude": {"entities": "test.recorder"}})
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 1
assert _state_empty_context(hass, "test2.recorder") == states[0]
def test_saving_state_exclude_domain_include_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"include": {"entities": "test.recorder"}, "exclude": {"domains": "test"}}
)
states = _add_entities(hass, ["test.recorder", "test2.recorder"])
assert len(states) == 2
def test_saving_state_exclude_domain_glob_include_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{
"include": {"entities": ["test.recorder", "test.excluded_entity"]},
"exclude": {"domains": "test", "entity_globs": "*._excluded_*"},
}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test.excluded_entity"]
)
assert len(states) == 3
def test_saving_state_include_domain_exclude_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{"exclude": {"entities": "test.recorder"}, "include": {"domains": "test"}}
)
states = _add_entities(hass, ["test.recorder", "test2.recorder", "test.ok"])
assert len(states) == 1
assert _state_empty_context(hass, "test.ok") == states[0]
assert _state_empty_context(hass, "test.ok").state == "state2"
def test_saving_state_include_domain_glob_exclude_entity(hass_recorder):
"""Test saving and restoring a state."""
hass = hass_recorder(
{
"exclude": {"entities": ["test.recorder", "test2.included_entity"]},
"include": {"domains": "test", "entity_globs": "*._included_*"},
}
)
states = _add_entities(
hass, ["test.recorder", "test2.recorder", "test.ok", "test2.included_entity"]
)
assert len(states) == 1
assert _state_empty_context(hass, "test.ok") == states[0]
assert _state_empty_context(hass, "test.ok").state == "state2"
def test_saving_state_and_removing_entity(hass, hass_recorder):
"""Test saving the state of a removed entity."""
hass = hass_recorder()
entity_id = "lock.mine"
hass.states.set(entity_id, STATE_LOCKED)
hass.states.set(entity_id, STATE_UNLOCKED)
hass.states.async_remove(entity_id)
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 3
assert states[0].entity_id == entity_id
assert states[0].state == STATE_LOCKED
assert states[1].entity_id == entity_id
assert states[1].state == STATE_UNLOCKED
assert states[2].entity_id == entity_id
assert states[2].state is None
def test_recorder_setup_failure():
"""Test some exceptions."""
hass = get_test_home_assistant()
with patch.object(Recorder, "_setup_connection") as setup, patch(
"homeassistant.components.recorder.time.sleep"
):
setup.side_effect = ImportError("driver not found")
rec = Recorder(
hass,
auto_purge=True,
keep_days=7,
commit_interval=1,
uri="sqlite://",
db_max_retries=10,
db_retry_wait=3,
entity_filter=CONFIG_SCHEMA({DOMAIN: {}}),
exclude_t=[],
db_integrity_check=False,
)
rec.start()
rec.join()
hass.stop()
async def test_defaults_set(hass):
"""Test the config defaults are set."""
recorder_config = None
async def mock_setup(hass, config):
"""Mock setup."""
nonlocal recorder_config
recorder_config = config["recorder"]
return True
with patch("homeassistant.components.recorder.async_setup", side_effect=mock_setup):
assert await async_setup_component(hass, "history", {})
assert recorder_config is not None
# pylint: disable=unsubscriptable-object
assert recorder_config["auto_purge"]
assert recorder_config["purge_keep_days"] == 10
def run_tasks_at_time(hass, test_time):
"""Advance the clock and wait for any callbacks to finish."""
fire_time_changed(hass, test_time)
hass.block_till_done()
hass.data[DATA_INSTANCE].block_till_done()
def test_auto_purge(hass_recorder):
"""Test periodic purge alarm scheduling."""
hass = hass_recorder()
original_tz = dt_util.DEFAULT_TIME_ZONE
tz = dt_util.get_time_zone("Europe/Copenhagen")
dt_util.set_default_time_zone(tz)
# Purging is schedule to happen at 4:12am every day. Exercise this behavior
# by firing alarms and advancing the clock around this time. Pick an arbitrary
# year in the future to avoid boundary conditions relative to the current date.
#
# The clock is started at 4:15am then advanced forward below
now = dt_util.utcnow()
test_time = tz.localize(datetime(now.year + 2, 1, 1, 4, 15, 0))
run_tasks_at_time(hass, test_time)
with patch(
"homeassistant.components.recorder.purge.purge_old_data", return_value=True
) as purge_old_data:
# Advance one day, and the purge task should run
test_time = test_time + timedelta(days=1)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 1
purge_old_data.reset_mock()
# Advance one day, and the purge task should run again
test_time = test_time + timedelta(days=1)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 1
purge_old_data.reset_mock()
# Advance less than one full day. The alarm should not yet fire.
test_time = test_time + timedelta(hours=23)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 0
# Advance to the next day and fire the alarm again
test_time = test_time + timedelta(hours=1)
run_tasks_at_time(hass, test_time)
assert len(purge_old_data.mock_calls) == 1
dt_util.set_default_time_zone(original_tz)
def test_saving_sets_old_state(hass_recorder):
"""Test saving sets old state."""
hass = hass_recorder()
hass.states.set("test.one", "on", {})
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
hass.states.set("test.one", "off", {})
hass.states.set("test.two", "off", {})
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 4
assert states[0].entity_id == "test.one"
assert states[1].entity_id == "test.two"
assert states[2].entity_id == "test.one"
assert states[3].entity_id == "test.two"
assert states[0].old_state_id is None
assert states[1].old_state_id is None
assert states[2].old_state_id == states[0].state_id
assert states[3].old_state_id == states[1].state_id
def test_saving_state_with_serializable_data(hass_recorder, caplog):
"""Test saving data that cannot be serialized does not crash."""
hass = hass_recorder()
hass.states.set("test.one", "on", {"fail": CannotSerializeMe()})
wait_recording_done(hass)
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
hass.states.set("test.two", "off", {})
wait_recording_done(hass)
with session_scope(hass=hass) as session:
states = list(session.query(States))
assert len(states) == 2
assert states[0].entity_id == "test.two"
assert states[1].entity_id == "test.two"
assert states[0].old_state_id is None
assert states[1].old_state_id == states[0].state_id
assert "State is not JSON serializable" in caplog.text
def test_run_information(hass_recorder):
"""Ensure run_information returns expected data."""
before_start_recording = dt_util.utcnow()
hass = hass_recorder()
run_info = run_information_from_instance(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
with session_scope(hass=hass) as session:
run_info = run_information_with_session(session)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
run_info = run_information(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
hass.states.set("test.two", "on", {})
wait_recording_done(hass)
run_info = run_information(hass)
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
run_info = run_information(hass, before_start_recording)
assert run_info is None
run_info = run_information(hass, dt_util.utcnow())
assert isinstance(run_info, RecorderRuns)
assert run_info.closed_incorrect is False
class CannotSerializeMe:
"""A class that the JSONEncoder cannot serialize."""
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
import boto
import logging
import os
import random
import shutil
import subprocess
import sys
import tempfile
import time
from optparse import OptionParser
from sys import stderr
from boto.ec2.blockdevicemapping import BlockDeviceMapping, EBSBlockDeviceType
# Configure and parse our command-line arguments
def parse_args():
parser = OptionParser(usage="mesos-ec2 [options] <action> <cluster_name>"
+ "\n\n<action> can be: launch, destroy, login, stop, start, get-master",
add_help_option=False)
parser.add_option("-h", "--help", action="help",
help="Show this help message and exit")
parser.add_option("-s", "--slaves", type="int", default=1,
help="Number of slaves to launch (default: 1)")
parser.add_option("-w", "--wait", type="int", default=60,
help="Number of seconds to wait for cluster nodes to start (default: 60)")
parser.add_option("-k", "--key-pair",
help="Key pair to use on instances")
parser.add_option("-i", "--identity-file",
help="SSH private key file to use for logging into instances")
parser.add_option("-t", "--instance-type", default="m1.large",
help="Type of instance to launch (default: m1.large). " +
"WARNING: must be 64 bit, thus small instances won't work")
parser.add_option("-m", "--master-instance-type", default="",
help="Master instance type (leave empty for same as instance-type)")
parser.add_option("-z", "--zone", default="us-east-1b",
help="Availability zone to launch instances in")
parser.add_option("-a", "--ami", default="ami-4521e52c",
help="Amazon Machine Image ID to use")
parser.add_option("-o", "--os", default="amazon64",
help="OS on the Amazon Machine Image (default: amazon64)")
parser.add_option("-d", "--download", metavar="SOURCE", default="none",
help="Where to download latest code from: set to 'git' to check out " +
"from git, or 'none' to use the Mesos on the AMI (default)")
parser.add_option("-b", "--branch", default="master",
help="If using git, which branch to check out. Default is 'master'")
parser.add_option("-D", metavar="[ADDRESS:]PORT", dest="proxy_port",
help="Use SSH dynamic port forwarding to create a SOCKS proxy at " +
"the given local address (for use with login)")
parser.add_option("--resume", action="store_true", default=False,
help="Resume installation on a previously launched cluster " +
"(for debugging)")
parser.add_option("-f", "--ft", metavar="NUM_MASTERS", default="1",
help="Number of masters to run. Default is 1. Greater values " +
"make Mesos run in fault-tolerant mode with ZooKeeper.")
parser.add_option("--ebs-vol-size", metavar="SIZE", type="int", default=0,
help="Attach a new EBS volume of size SIZE (in GB) to each node as " +
"/vol. The volumes will be deleted when the instances terminate. " +
"Only possible on EBS-backed AMIs.")
parser.add_option("--swap", metavar="SWAP", type="int", default=1024,
help="Swap space to set up per node, in MB (default: 1024)")
parser.add_option("--spot-price", metavar="PRICE", type="float",
help="If specified, launch slaves as spot instances with the given " +
"maximum price (in dollars)")
(opts, args) = parser.parse_args()
opts.ft = int(opts.ft)
if len(args) != 2:
parser.print_help()
sys.exit(1)
(action, cluster_name) = args
if opts.identity_file == None and action in ['launch', 'login']:
print >> stderr, ("ERROR: The -i or --identity-file argument is " +
"required for " + action)
sys.exit(1)
if os.getenv('AWS_ACCESS_KEY_ID') == None:
print >> stderr, ("ERROR: The environment variable AWS_ACCESS_KEY_ID " +
"must be set")
sys.exit(1)
if os.getenv('AWS_SECRET_ACCESS_KEY') == None:
print >> stderr, ("ERROR: The environment variable AWS_SECRET_ACCESS_KEY " +
"must be set")
sys.exit(1)
return (opts, action, cluster_name)
# Get the EC2 security group of the given name, creating it if it doesn't exist
def get_or_make_group(conn, name):
groups = conn.get_all_security_groups()
group = [g for g in groups if g.name == name]
if len(group) > 0:
return group[0]
else:
print "Creating security group " + name
return conn.create_security_group(name, "Mesos EC2 group")
# Wait for a set of launched instances to exit the "pending" state
# (i.e. either to start running or to fail and be terminated)
def wait_for_instances(conn, instances):
while True:
for i in instances:
i.update()
if len([i for i in instances if i.state == 'pending']) > 0:
time.sleep(5)
else:
return
# Check whether a given EC2 instance object is in a state we consider active,
# i.e. not terminating or terminated. We count both stopping and stopped as
# active since we can restart stopped clusters.
def is_active(instance):
return (instance.state in ['pending', 'running', 'stopping', 'stopped'])
# Launch a cluster of the given name, by setting up its security groups,
# and then starting new instances in them.
# Returns a tuple of EC2 reservation objects for the master, slave
# and zookeeper instances (in that order).
# Fails if there already instances running in the cluster's groups.
def launch_cluster(conn, opts, cluster_name):
print "Setting up security groups..."
master_group = get_or_make_group(conn, cluster_name + "-master")
slave_group = get_or_make_group(conn, cluster_name + "-slaves")
zoo_group = get_or_make_group(conn, cluster_name + "-zoo")
if master_group.rules == []: # Group was just now created
master_group.authorize(src_group=master_group)
master_group.authorize(src_group=slave_group)
master_group.authorize(src_group=zoo_group)
master_group.authorize('tcp', 22, 22, '0.0.0.0/0')
master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0')
master_group.authorize('tcp', 38090, 38090, '0.0.0.0/0')
if slave_group.rules == []: # Group was just now created
slave_group.authorize(src_group=master_group)
slave_group.authorize(src_group=slave_group)
slave_group.authorize(src_group=zoo_group)
slave_group.authorize('tcp', 22, 22, '0.0.0.0/0')
slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')
if zoo_group.rules == []: # Group was just now created
zoo_group.authorize(src_group=master_group)
zoo_group.authorize(src_group=slave_group)
zoo_group.authorize(src_group=zoo_group)
zoo_group.authorize('tcp', 22, 22, '0.0.0.0/0')
zoo_group.authorize('tcp', 2181, 2181, '0.0.0.0/0')
zoo_group.authorize('tcp', 2888, 2888, '0.0.0.0/0')
zoo_group.authorize('tcp', 3888, 3888, '0.0.0.0/0')
# Check if instances are already running in our groups
print "Checking for running cluster..."
reservations = conn.get_all_instances()
for res in reservations:
group_names = [g.id for g in res.groups]
if master_group.name in group_names or slave_group.name in group_names or zoo_group.name in group_names:
active = [i for i in res.instances if is_active(i)]
if len(active) > 0:
print >> stderr, ("ERROR: There are already instances running in " +
"group %s, %s or %s" % (master_group.name, slave_group.name, zoo_group.name))
sys.exit(1)
print "Launching instances..."
try:
image = conn.get_all_images(image_ids=[opts.ami])[0]
except:
print >> stderr, "Could not find AMI " + opts.ami
sys.exit(1)
# Create block device mapping so that we can add an EBS volume if asked to
block_map = BlockDeviceMapping()
if opts.ebs_vol_size > 0:
device = EBSBlockDeviceType()
device.size = opts.ebs_vol_size
device.delete_on_termination = True
block_map["/dev/sdv"] = device
# Launch slaves
if opts.spot_price != None:
# Launch spot instances with the requested price
print ("Requesting %d slaves as spot instances with price $%.3f" %
(opts.slaves, opts.spot_price))
slave_reqs = conn.request_spot_instances(
price = opts.spot_price,
image_id = opts.ami,
launch_group = "launch-group-%s" % cluster_name,
placement = opts.zone,
count = opts.slaves,
key_name = opts.key_pair,
security_groups = [slave_group],
instance_type = opts.instance_type,
block_device_map = block_map)
my_req_ids = [req.id for req in slave_reqs]
print "Waiting for spot instances to be granted..."
while True:
time.sleep(10)
reqs = conn.get_all_spot_instance_requests()
id_to_req = {}
for r in reqs:
id_to_req[r.id] = r
active = 0
instance_ids = []
for i in my_req_ids:
if id_to_req[i].state == "active":
active += 1
instance_ids.append(id_to_req[i].instance_id)
if active == opts.slaves:
print "All %d slaves granted" % opts.slaves
reservations = conn.get_all_instances(instance_ids)
slave_nodes = []
for r in reservations:
slave_nodes += r.instances
break
else:
print "%d of %d slaves granted, waiting longer" % (active, opts.slaves)
else:
# Launch non-spot instances
slave_res = image.run(key_name = opts.key_pair,
security_groups = [slave_group],
instance_type = opts.instance_type,
placement = opts.zone,
min_count = opts.slaves,
max_count = opts.slaves,
block_device_map = block_map)
slave_nodes = slave_res.instances
print "Launched slaves, regid = " + slave_res.id
# Launch masters
master_type = opts.master_instance_type
if master_type == "":
master_type = opts.instance_type
master_res = image.run(key_name = opts.key_pair,
security_groups = [master_group],
instance_type = master_type,
placement = opts.zone,
min_count = opts.ft,
max_count = opts.ft,
block_device_map = block_map)
master_nodes = master_res.instances
print "Launched master, regid = " + master_res.id
# Launch ZooKeeper nodes if required
if opts.ft > 1:
zoo_res = image.run(key_name = opts.key_pair,
security_groups = [zoo_group],
instance_type = opts.instance_type,
placement = opts.zone,
min_count = 3,
max_count = 3,
block_device_map = block_map)
zoo_nodes = zoo_res.instances
print "Launched zoo, regid = " + zoo_res.id
else:
zoo_nodes = []
# Return all the instances
return (master_nodes, slave_nodes, zoo_nodes)
# Get the EC2 instances in an existing cluster if available.
# Returns a tuple of lists of EC2 instance objects for the masters,
# slaves and zookeeper nodes (in that order).
def get_existing_cluster(conn, opts, cluster_name):
print "Searching for existing cluster " + cluster_name + "..."
reservations = conn.get_all_instances()
master_nodes = []
slave_nodes = []
zoo_nodes = []
for res in reservations:
active = [i for i in res.instances if is_active(i)]
if len(active) > 0:
group_names = [g.id for g in res.groups]
if group_names == [cluster_name + "-master"]:
master_nodes += res.instances
elif group_names == [cluster_name + "-slaves"]:
slave_nodes += res.instances
elif group_names == [cluster_name + "-zoo"]:
zoo_nodes += res.instances
if master_nodes != [] and slave_nodes != []:
print ("Found %d master(s), %d slaves, %d ZooKeeper nodes" %
(len(master_nodes), len(slave_nodes), len(zoo_nodes)))
return (master_nodes, slave_nodes, zoo_nodes)
else:
if master_nodes == [] and slave_nodes != []:
print "ERROR: Could not find master in group " + cluster_name + "-master"
elif master_nodes != [] and slave_nodes == []:
print "ERROR: Could not find slaves in group " + cluster_name + "-slaves"
else:
print "ERROR: Could not find any existing cluster"
sys.exit(1)
# Deploy configuration files and run setup scripts on a newly launched
# or started EC2 cluster.
def setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, deploy_ssh_key):
print "Deploying files to master..."
deploy_files(conn, "deploy." + opts.os, opts, master_nodes, slave_nodes, zoo_nodes)
master = master_nodes[0].public_dns_name
if deploy_ssh_key:
print "Copying SSH key %s to master..." % opts.identity_file
ssh(master, opts, 'mkdir -p /root/.ssh')
scp(master, opts, opts.identity_file, '/root/.ssh/id_rsa')
print "Running setup on master..."
ssh(master, opts, "chmod u+x mesos-ec2/setup")
ssh(master, opts, "mesos-ec2/setup %s %s %s %s" %
(opts.os, opts.download, opts.branch, opts.swap))
print "Done!"
# Wait for a whole cluster (masters, slaves and ZooKeeper) to start up
def wait_for_cluster(conn, wait_secs, master_nodes, slave_nodes, zoo_nodes):
print "Waiting for instances to start up..."
time.sleep(5)
wait_for_instances(conn, master_nodes)
wait_for_instances(conn, slave_nodes)
if zoo_nodes != []:
wait_for_instances(conn, zoo_nodes)
print "Waiting %d more seconds..." % wait_secs
time.sleep(wait_secs)
# Get number of local disks available for a given EC2 instance type.
def get_num_disks(instance_type):
if instance_type in ["m1.xlarge", "c1.xlarge", "m2.xlarge", "cc1.4xlarge"]:
return 4
elif instance_type in ["m1.small", "c1.medium"]:
return 1
else:
return 2
# Deploy the configuration file templates in a given local directory to
# a cluster, filling in any template parameters with information about the
# cluster (e.g. lists of masters and slaves). Files are only deployed to
# the first master instance in the cluster, and we expect the setup
# script to be run on that instance to copy them to other nodes.
def deploy_files(conn, root_dir, opts, master_nodes, slave_nodes, zoo_nodes):
active_master = master_nodes[0].public_dns_name
num_disks = get_num_disks(opts.instance_type)
hdfs_data_dirs = "/mnt/ephemeral-hdfs/data"
mapred_local_dirs = "/mnt/hadoop/mrlocal"
if num_disks > 1:
for i in range(2, num_disks + 1):
hdfs_data_dirs += ",/mnt%d/ephemeral-hdfs/data" % i
mapred_local_dirs += ",/mnt%d/hadoop/mrlocal" % i
if zoo_nodes != []:
zoo_list = '\n'.join([i.public_dns_name for i in zoo_nodes])
cluster_url = "zoo://" + ",".join(
["%s:2181/mesos" % i.public_dns_name for i in zoo_nodes])
else:
zoo_list = "NONE"
# TODO: temporary code to support older versions of Mesos with 1@ URLs
if opts.os == "amazon64-new":
cluster_url = "master@%s:5050" % active_master
else:
cluster_url = "1@%s:5050" % active_master
template_vars = {
"master_list": '\n'.join([i.public_dns_name for i in master_nodes]),
"active_master": active_master,
"slave_list": '\n'.join([i.public_dns_name for i in slave_nodes]),
"zoo_list": zoo_list,
"cluster_url": cluster_url,
"hdfs_data_dirs": hdfs_data_dirs,
"mapred_local_dirs": mapred_local_dirs
}
# Create a temp directory in which we will place all the files to be
# deployed after we substitue template parameters in them
tmp_dir = tempfile.mkdtemp()
for path, dirs, files in os.walk(root_dir):
dest_dir = os.path.join('/', path[len(root_dir):])
local_dir = tmp_dir + dest_dir
if not os.path.exists(local_dir):
os.makedirs(local_dir)
for filename in files:
if filename[0] not in '#.~' and filename[-1] != '~':
dest_file = os.path.join(dest_dir, filename)
local_file = tmp_dir + dest_file
with open(os.path.join(path, filename)) as src:
with open(local_file, "w") as dest:
text = src.read()
for key in template_vars:
text = text.replace("{{" + key + "}}", template_vars[key])
dest.write(text)
dest.close()
# rsync the whole directory over to the master machine
command = (("rsync -rv -e 'ssh -o StrictHostKeyChecking=no -i %s' " +
"'%s/' 'root@%s:/'") % (opts.identity_file, tmp_dir, active_master))
subprocess.check_call(command, shell=True)
# Remove the temp directory we created above
shutil.rmtree(tmp_dir)
# Copy a file to a given host through scp, throwing an exception if scp fails
def scp(host, opts, local_file, dest_file):
subprocess.check_call(
"scp -q -o StrictHostKeyChecking=no -i %s '%s' 'root@%s:%s'" %
(opts.identity_file, local_file, host, dest_file), shell=True)
# Run a command on a host through ssh, throwing an exception if ssh fails
def ssh(host, opts, command):
subprocess.check_call(
"ssh -t -o StrictHostKeyChecking=no -i %s root@%s '%s'" %
(opts.identity_file, host, command), shell=True)
def main():
(opts, action, cluster_name) = parse_args()
conn = boto.connect_ec2()
# Select an AZ at random if it was not specified.
if opts.zone == "":
opts.zone = random.choice(conn.get_all_zones()).name
if action == "launch":
if opts.resume:
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
else:
(master_nodes, slave_nodes, zoo_nodes) = launch_cluster(
conn, opts, cluster_name)
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes, zoo_nodes)
setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, True)
elif action == "destroy":
response = raw_input("Are you sure you want to destroy the cluster " +
cluster_name + "?\nALL DATA ON ALL NODES WILL BE LOST!!\n" +
"Destroy cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
print "Terminating master..."
for inst in master_nodes:
inst.terminate()
print "Terminating slaves..."
for inst in slave_nodes:
inst.terminate()
if zoo_nodes != []:
print "Terminating zoo..."
for inst in zoo_nodes:
inst.terminate()
elif action == "login":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
master = master_nodes[0].public_dns_name
print "Logging into master " + master + "..."
proxy_opt = ""
if opts.proxy_port != None:
proxy_opt = "-D " + opts.proxy_port
subprocess.check_call("ssh -o StrictHostKeyChecking=no -i %s %s root@%s" %
(opts.identity_file, proxy_opt, master), shell=True)
elif action == "get-master":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(conn, opts, cluster_name)
print master_nodes[0].public_dns_name
elif action == "stop":
response = raw_input("Are you sure you want to stop the cluster " +
cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
"BUT THE CLUSTER WILL KEEP USING SPACE ON\n" +
"AMAZON EBS IF IT IS EBS-BACKED!!\n" +
"Stop cluster " + cluster_name + " (y/N): ")
if response == "y":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
print "Stopping master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
print "Stopping slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
if zoo_nodes != []:
print "Stopping zoo..."
for inst in zoo_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.stop()
elif action == "start":
(master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
conn, opts, cluster_name)
print "Starting slaves..."
for inst in slave_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
print "Starting master..."
for inst in master_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
if zoo_nodes != []:
print "Starting zoo..."
for inst in zoo_nodes:
if inst.state not in ["shutting-down", "terminated"]:
inst.start()
wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes, zoo_nodes)
setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, False)
elif action == "shutdown":
print >> stderr, ("The shutdown action is no longer available.\n" +
"Use either 'destroy' to delete a cluster and all data on it,\n" +
"or 'stop' to shut down the machines but have them persist if\n" +
"you launched an EBS-backed cluster.")
sys.exit(1)
else:
print >> stderr, "Invalid action: %s" % action
sys.exit(1)
if __name__ == "__main__":
logging.basicConfig()
main()
| |
#
# Copyright 2010-2015
#
import copy
from PySide import QtGui, QtCore
from pyflowgraph.graph_view import GraphView
from pyflowgraph.graph_view import MANIP_MODE_NONE, MANIP_MODE_SELECT, MANIP_MODE_PAN, MANIP_MODE_MOVE, MANIP_MODE_ZOOM
from pyflowgraph.connection import Connection
from pyflowgraph.selection_rect import SelectionRect
from knode import KNode
from kbackdrop import KBackdrop
from edit_index_widget import EditIndexWidget
from kraken.core.maths import Vec2
from kraken.core.kraken_system import KrakenSystem
from kraken.core.configs.config import Config
class KGraphView(GraphView):
beginCopyData = QtCore.Signal()
endCopyData = QtCore.Signal()
beginPasteData = QtCore.Signal()
endPasteData = QtCore.Signal()
_clipboardData = None
def __init__(self, parent=None):
super(KGraphView, self).__init__(parent)
self.__rig = None
self.setAcceptDrops(True)
def getRig(self):
return self.__rig
# ======
# Graph
# ======
def displayGraph(self, rig):
self.reset()
self.__rig = rig
guideComponents = self.__rig.getChildrenByType('Component')
for component in guideComponents:
node = KNode(self, component)
self.addNode(node)
for component in guideComponents:
for i in range(component.getNumInputs()):
componentInput = component.getInputByIndex(i)
if componentInput.isConnected():
componentOutput = componentInput.getConnection()
self.connectPorts(
srcNode=componentOutput.getParent().getDecoratedName(), outputName=componentOutput.getName(),
tgtNode=component.getDecoratedName(), inputName=componentInput.getName()
)
# Get backdrops from meta data
metaData = self.__rig.getMetaData()
if 'backdrops' in metaData:
for backdrop in metaData['backdrops']:
backdropNode = KBackdrop(self, backdrop.get('name', 'Backdrop'))
self.addNode(backdropNode)
backdropNode.setData(backdrop)
self.frameAllNodes()
def addConnection(self, connection, emitSignal=True):
result = super(KGraphView, self).addConnection(connection, emitSignal=emitSignal)
# Indicate that this is an indexed connection.
outPort = connection.getSrcPortCircle().getPort()
inPort = connection.getDstPortCircle().getPort()
if outPort is not None and inPort is not None and outPort.getDataType() != inPort.getDataType():
if outPort.getDataType().startswith(inPort.getDataType()) and outPort.getDataType().endswith('[]'):
connection.setPenStyle(QtCore.Qt.DashDotLine)
connection.setPenWidth(2.5)
return connection
def getNodesOfType(self, nodeType):
"""Gets all the nodes of the specified type.
Arguments:
nodeType -- String, class name to search nodes for.
Return:
list, nodes that are of the specified type.
"""
graphNodes = self.getNodes()
return [graphNodes[x] for x in graphNodes if type(graphNodes[x]).__name__ == nodeType]
# =======
# Events
# =======
def mousePressEvent(self, event):
modifiers = QtGui.QApplication.keyboardModifiers()
if event.button() == QtCore.Qt.MouseButton.RightButton:
zoom_with_alt_rmb = self.window().preferences.getPreferenceValue('zoom_with_alt_rmb')
if zoom_with_alt_rmb and modifiers == QtCore.Qt.AltModifier:
self._manipulationMode = MANIP_MODE_ZOOM
self.setCursor(QtCore.Qt.SizeHorCursor)
self._lastMousePos = event.pos()
self._lastTransform = QtGui.QTransform(self.transform())
self._lastSceneRect = self.sceneRect()
self._lastSceneCenter = self._lastSceneRect.center()
self._lastScenePos = self.mapToScene(event.pos())
self._lastOffsetFromSceneCenter = self._lastScenePos - self._lastSceneCenter
return
def graphItemAt(item):
if isinstance(item, KNode):
return item
if isinstance(item, Connection):
return item
elif item is not None:
return graphItemAt(item.parentItem())
return None
graphicItem = graphItemAt(self.itemAt(event.pos()))
pos = self.mapToScene(event.pos())
if graphicItem is None:
contextMenu = QtGui.QMenu(self.getGraphViewWidget())
contextMenu.setObjectName('rightClickContextMenu')
contextMenu.setMinimumWidth(150)
if self.getClipboardData() is not None:
def pasteSettings():
self.pasteSettings(pos)
def pasteSettingsMirrored():
self.pasteSettings(pos, mirrored=True)
contextMenu.addAction("Paste").triggered.connect(pasteSettings)
contextMenu.addAction("Paste Mirrored").triggered.connect(pasteSettingsMirrored)
contextMenu.addSeparator()
graphViewWidget = self.getGraphViewWidget()
contextMenu.addAction("Add Backdrop").triggered.connect(graphViewWidget.addBackdrop)
contextMenu.popup(event.globalPos())
if isinstance(graphicItem, KNode):
self.selectNode(graphicItem, clearSelection=True, emitSignal=True)
contextMenu = QtGui.QMenu(self.getGraphViewWidget())
contextMenu.setObjectName('rightClickContextMenu')
contextMenu.setMinimumWidth(150)
def copySettings():
self.copySettings(pos)
contextMenu.addAction("Copy").triggered.connect(copySettings)
if self.getClipboardData() is not None:
def pasteSettings():
# Paste the settings, not modifying the location, because that will be used to determine symmetry.
pasteData = self.getClipboardData()['components'][0]
pasteData.pop('graphPos', None)
graphicItem.getComponent().pasteData(pasteData, setLocation=False)
contextMenu.addSeparator()
contextMenu.addAction("Paste Data").triggered.connect(pasteSettings)
contextMenu.popup(event.globalPos())
elif isinstance(graphicItem, Connection):
outPort = graphicItem.getSrcPortCircle().getPort()
inPort = graphicItem.getDstPortCircle().getPort()
if outPort.getDataType() != inPort.getDataType():
if outPort.getDataType().startswith(inPort.getDataType()) and outPort.getDataType().endswith('[]'):
globalPos = event.globalPos()
contextMenu = QtGui.QMenu(self.getGraphViewWidget())
contextMenu.setObjectName('rightClickContextMenu')
contextMenu.setMinimumWidth(150)
def editIndex():
componentInput = graphicItem.getDstPortCircle().getPort().getComponentInput()
EditIndexWidget(componentInput, pos=globalPos, parent=self.getGraphViewWidget())
contextMenu.addAction("EditIndex").triggered.connect(editIndex)
contextMenu.popup(globalPos)
elif event.button() is QtCore.Qt.MouseButton.LeftButton and self.itemAt(event.pos()) is None:
self.beginNodeSelection.emit()
self._manipulationMode = MANIP_MODE_SELECT
self._mouseDownSelection = copy.copy(self.getSelectedNodes())
self._selectionRect = SelectionRect(graph=self, mouseDownPos=self.mapToScene(event.pos()))
elif event.button() is QtCore.Qt.MouseButton.MiddleButton:
pan_with_alt = self.window().preferences.getPreferenceValue('pan_with_alt')
if pan_with_alt is True and modifiers != QtCore.Qt.AltModifier:
return
self.setCursor(QtCore.Qt.OpenHandCursor)
self._manipulationMode = MANIP_MODE_PAN
self._lastPanPoint = self.mapToScene(event.pos())
else:
super(GraphView, self).mousePressEvent(event)
def dragEnterEvent(self, event):
textParts = event.mimeData().text().split(':')
if textParts[0] == 'KrakenComponent':
event.accept()
else:
event.setDropAction(QtCore.Qt.IgnoreAction)
super(GraphView, self).dragEnterEvent(event)
def dragMoveEvent(self, event):
super(GraphView, self).dragMoveEvent(event)
event.accept()
def dropEvent(self, event):
textParts = event.mimeData().text().split(':')
if textParts[0] == 'KrakenComponent':
componentClassName = textParts[1]
# Add a component to the rig placed at the given position.
dropPosition = self.mapToScene(event.pos())
# construct the node and add it to the graph.
krakenSystem = KrakenSystem.getInstance()
componentClass = krakenSystem.getComponentClass( componentClassName )
component = componentClass(parent=self.getRig())
component.setGraphPos(Vec2(dropPosition.x(), dropPosition.y()))
node = KNode(self, component)
self.addNode(node)
self.selectNode(node, clearSelection=True, emitSignal=False)
event.acceptProposedAction()
else:
super(GraphView, self).dropEvent(event)
def wheelEvent(self, event):
zoom_mouse_scroll = self.window().preferences.getPreferenceValue('zoom_mouse_scroll')
if zoom_mouse_scroll is True:
super(KGraphView, self).wheelEvent(event)
# =============
# Copy / Paste
# =============
def getClipboardData(self):
return self.__class__._clipboardData
def copySettings(self, pos):
clipboardData = {}
copiedComponents = []
nodes = self.getSelectedNodes()
for node in nodes:
copiedComponents.append(node.getComponent())
componentsJson = []
connectionsJson = []
for component in copiedComponents:
componentsJson.append(component.copyData())
for i in range(component.getNumInputs()):
componentInput = component.getInputByIndex(i)
if componentInput.isConnected():
componentOutput = componentInput.getConnection()
connectionJson = {
'source': componentOutput.getParent().getDecoratedName() + '.' + componentOutput.getName(),
'target': component.getDecoratedName() + '.' + componentInput.getName()
}
connectionsJson.append(connectionJson)
clipboardData = {
'components': componentsJson,
'connections': connectionsJson,
'copyPos': pos
}
self.__class__._clipboardData = clipboardData
def pasteSettings(self, pos, mirrored=False, createConnectionsToExistingNodes=True):
clipboardData = self.__class__._clipboardData
krakenSystem = KrakenSystem.getInstance()
delta = pos - clipboardData['copyPos']
self.clearSelection()
pastedComponents = {}
nameMapping = {}
for componentData in clipboardData['components']:
componentClass = krakenSystem.getComponentClass(componentData['class'])
component = componentClass(parent=self.__rig)
decoratedName = componentData['name'] + component.getNameDecoration()
nameMapping[decoratedName] = decoratedName
if mirrored:
config = Config.getInstance()
mirrorMap = config.getNameTemplate()['mirrorMap']
component.setLocation(mirrorMap[componentData['location']])
nameMapping[decoratedName] = componentData['name'] + component.getNameDecoration()
component.pasteData(componentData, setLocation=False)
else:
component.pasteData(componentData, setLocation=True)
graphPos = component.getGraphPos()
component.setGraphPos(Vec2(graphPos.x + delta.x(), graphPos.y + delta.y()))
node = KNode(self, component)
self.addNode(node)
self.selectNode(node, False)
# save a dict of the nodes using the orignal names
pastedComponents[nameMapping[decoratedName]] = component
# Create Connections
for connectionData in clipboardData['connections']:
sourceComponentDecoratedName, outputName = connectionData['source'].split('.')
targetComponentDecoratedName, inputName = connectionData['target'].split('.')
sourceComponent = None
# The connection is either between nodes that were pasted, or from pasted nodes
# to unpasted nodes. We first check that the source component is in the pasted group
# else use the node in the graph.
if sourceComponentDecoratedName in nameMapping:
sourceComponent = pastedComponents[nameMapping[sourceComponentDecoratedName]]
else:
if not createConnectionsToExistingNodes:
continue
# When we support copying/pasting between rigs, then we may not find the source
# node in the target rig.
if not self.hasNode(sourceComponentDecoratedName):
continue
node = self.getNode(sourceComponentDecoratedName)
sourceComponent = node.getComponent()
targetComponentDecoratedName = nameMapping[targetComponentDecoratedName]
targetComponent = pastedComponents[targetComponentDecoratedName]
outputPort = sourceComponent.getOutputByName(outputName)
inputPort = targetComponent.getInputByName(inputName)
inputPort.setConnection(outputPort)
self.connectPorts(
srcNode=sourceComponent.getDecoratedName(), outputName=outputPort.getName(),
tgtNode=targetComponent.getDecoratedName(), inputName=inputPort.getName()
)
| |
# ex:ts=4:sw=4:sts=4:et
# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-
import datetime
import hashlib
import logging
import random
import re
from urllib.parse import urlparse
from svtplay_dl.error import ServiceError
from svtplay_dl.fetcher.hls import hlsparse
from svtplay_dl.service import Service
from svtplay_dl.subtitle import subtitle
country = {"sv": ".se", "da": ".dk", "no": ".no"}
REALMS = {"discoveryplus.se": "dplayse", "discoveryplus.no": "dplayno", "discoveryplus.dk": "dplaydk"}
class Dplay(Service):
supported_domains = ["discoveryplus.se", "discoveryplus.no", "discoveryplus.dk"]
packages = []
def get(self):
parse = urlparse(self.url)
self.domain = re.search(r"(discoveryplus\.\w\w)", parse.netloc).group(1)
if not self._token():
logging.error("Something went wrong getting token for requests")
if not self._login():
yield ServiceError("You need the 'st' cookie from your web brower for the site to make it work")
return
channel = False
if "kanaler" in parse.path:
match = re.search("kanaler/([^/]+)$", parse.path)
if not match:
yield ServiceError("Can't detect 'kanaler'")
return
path = "/channels/{}".format(match.group(1))
url = "https://disco-api.{}/content{}".format(self.domain, path)
channel = True
self.config.set("live", True)
elif "program" in parse.path:
match = re.search("(programmer|program)/([^/]+)$", parse.path)
if not match:
yield ServiceError("Can't find program url")
return
path = "/shows/{}".format(match.group(2))
url = "https://disco-api.{}/content{}".format(self.domain, path)
res = self.http.get(url, headers={"x-disco-client": "WEB:UNKNOWN:dplay-client:0.0.1"})
programid = res.json()["data"]["id"]
qyerystring = (
"include=primaryChannel,show&filter[videoType]=EPISODE&filter[show.id]={}&"
"page[size]=100&sort=seasonNumber,episodeNumber,-earliestPlayableStart".format(programid)
)
res = self.http.get("https://disco-api.{}/content/videos?{}".format(self.domain, qyerystring))
janson = res.json()
vid = 0
slug = None
for i in janson["data"]:
if int(i["id"]) > vid:
vid = int(i["id"])
slug = i["attributes"]["path"]
if slug:
url = "https://disco-api.{}/content/videos/{}".format(self.domain, slug)
else:
yield ServiceError("Cant find latest video on program url")
return
else:
match = re.search("(videos|videoer)/(.*)$", parse.path)
url = "https://disco-api.{}/content/videos/{}".format(self.domain, match.group(2))
res = self.http.get(url, headers={"x-disco-client": "WEB:UNKNOWN:dplay-client:0.0.1"})
janson = res.json()
if "errors" in janson:
yield ServiceError("Cant find any videos on this url")
return
if channel:
name = janson["data"]["attributes"]["name"]
self.output["title"] = name
else:
name = self._autoname(janson)
if name is None:
yield ServiceError("Cant find vid id for autonaming")
return
self.output["id"] = janson["data"]["id"]
api = "https://disco-api.{}/playback/videoPlaybackInfo/{}?usePreAuth=true".format(self.domain, janson["data"]["id"])
res = self.http.get(api)
if res.status_code > 400:
yield ServiceError("You dont have permission to watch this")
return
streams = hlsparse(
self.config,
self.http.request("get", res.json()["data"]["attributes"]["streaming"]["hls"]["url"]),
res.json()["data"]["attributes"]["streaming"]["hls"]["url"],
httpobject=self.http,
output=self.output,
)
for n in list(streams.keys()):
if isinstance(streams[n], subtitle): # we get the subtitles from the hls playlist.
if self.config.get("get_all_subtitles"):
yield streams[n]
else:
if streams[n].subfix in country and country[streams[n].subfix] in self.domain:
yield streams[n]
else:
yield streams[n]
def _autoname(self, jsondata):
match = re.search("^([^/]+)/", jsondata["data"]["attributes"]["path"])
self.output["title"] = match.group(1)
self.output["season"] = int(jsondata["data"]["attributes"]["seasonNumber"])
self.output["episode"] = int(jsondata["data"]["attributes"]["episodeNumber"])
self.output["episodename"] = jsondata["data"]["attributes"]["name"]
return self.output["title"]
def find_all_episodes(self, config):
parse = urlparse(self.url)
self.domain = re.search(r"(discoveryplus\.\w\w)", parse.netloc).group(1)
programid = None
seasons = []
episodes = []
match = re.search("^/(program|programmer|videos|videoer)/([^/]+)", parse.path)
if not match:
logging.error("Can't find show name")
return None
if not self._login():
logging.error("Need the 'st' cookie to work")
return None
if not self._token():
logging.error("Something went wrong getting token for requests")
self._getpackages()
urllocal = ""
if self.domain in ["dplay.dk", "dplay.no"]:
urllocal = "mer"
url = "http://disco-api.{}/cms/routes/program{}/{}?decorators=viewingHistory&include=default".format(self.domain, urllocal, match.group(2))
res = self.http.get(url)
if res.status_code > 400:
logging.error("Cant find any videos. wrong url?")
return episodes
showid = None
for what in res.json()["included"]:
if "attributes" in what and "alias" in what["attributes"] and "season" in what["attributes"]["alias"]:
programid = what["id"]
for ses in what["attributes"]["component"]["filters"]:
if ses["id"] == "seasonNumber":
for opt in ses["options"]:
seasons.append(opt["value"])
if "mandatoryParams" in what["attributes"]["component"]:
showid = what["attributes"]["component"]["mandatoryParams"]
if programid:
for season in seasons:
page = 1
totalpages = 1
while page <= totalpages:
querystring = "decorators=viewingHistory&include=default&page[items.number]={}&pf[seasonNumber]={}".format(
page,
season,
)
if showid:
querystring += "&{}".format(showid)
res = self.http.get("https://disco-api.{}/cms/collections/{}?{}".format(self.domain, programid, querystring))
janson = res.json()
totalpages = janson["data"]["meta"]["itemsTotalPages"]
for i in janson["included"]:
if i["type"] != "video":
continue
if i["attributes"]["videoType"] == "EPISODE":
if not self._playablefile(i["attributes"]["availabilityWindows"]):
continue
episodes.append("https://www.{}/videos/{}".format(self.domain, i["attributes"]["path"]))
page += 1
if not episodes:
logging.error("Cant find any playable files")
if config.get("all_last") > 0:
return episodes[: config.get("all_last")]
return episodes
def _login(self):
res = self.http.get("https://disco-api.{}/users/me".format(self.domain), headers={"authority": "disco-api.{}".format(self.domain)})
if res.status_code >= 400:
return False
if not res.json()["data"]["attributes"]["anonymous"]:
return True
return False
def _token(self) -> bool:
# random device id for cookietoken
deviceid = hashlib.sha256(bytes(int(random.random() * 1000))).hexdigest()
url = "https://disco-api.{}/token?realm={}&deviceId={}&shortlived=true".format(self.domain, REALMS[self.domain], deviceid)
res = self.http.get(url)
if res.status_code >= 400:
return False
return True
def _getpackages(self):
res = self.http.get("https://disco-api.{}/users/me".format(self.domain), headers={"authority": "disco-api.{}".format(self.domain)})
if res.status_code < 400:
self.packages.extend(res.json()["data"]["attributes"]["packages"])
def _playablefile(self, needs):
playable = False
now = datetime.datetime.utcnow()
for package in self.packages:
for need in needs:
if package != need["package"]:
continue
start = datetime.datetime.strptime(need["playableStart"], "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=None)
if now > start:
if "playableEnd" in need:
end = datetime.datetime.strptime(need["playableEnd"], "%Y-%m-%dT%H:%M:%SZ").replace(tzinfo=None)
if now < end:
playable = True
else:
playable = True
return playable
| |
"""Webroot plugin."""
import argparse
import collections
import json
import logging
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Type
from typing import Union
from acme import challenges
from certbot import crypto_util
from certbot import errors
from certbot import interfaces
from certbot._internal import cli
from certbot.achallenges import AnnotatedChallenge
from certbot.compat import filesystem
from certbot.compat import os
from certbot.display import ops
from certbot.display import util as display_util
from certbot.plugins import common
from certbot.plugins import util
from certbot.util import safe_open
logger = logging.getLogger(__name__)
_WEB_CONFIG_CONTENT = """\
<?xml version="1.0" encoding="UTF-8" ?>
<!--Generated by Certbot-->
<configuration>
<system.webServer>
<staticContent>
<remove fileExtension="."/>
<mimeMap fileExtension="." mimeType="text/plain" />
</staticContent>
</system.webServer>
</configuration>
"""
# This list references the hashes of all versions of the web.config files that Certbot could
# have generated during an HTTP-01 challenge. If you modify _WEB_CONFIG_CONTENT, you MUST add
# the new hash in this list.
_WEB_CONFIG_SHA256SUMS = [
"20c5ca1bd58fa8ad5f07a2f1be8b7cbb707c20fcb607a8fc8db9393952846a97",
"8d31383d3a079d2098a9d0c0921f4ab87e708b9868dc3f314d54094c2fe70336"
]
class Authenticator(common.Plugin, interfaces.Authenticator):
"""Webroot Authenticator."""
description = "Place files in webroot directory"
MORE_INFO = """\
Authenticator plugin that performs http-01 challenge by saving
necessary validation resources to appropriate paths on the file
system. It expects that there is some other HTTP server configured
to serve all files under specified web root ({0})."""
def more_info(self) -> str: # pylint: disable=missing-function-docstring
return self.MORE_INFO.format(self.conf("path"))
@classmethod
def add_parser_arguments(cls, add: Callable[..., None]) -> None:
add("path", "-w", default=[], action=_WebrootPathAction,
help="public_html / webroot path. This can be specified multiple "
"times to handle different domains; each domain will have "
"the webroot path that preceded it. For instance: `-w "
"/var/www/example -d example.com -d www.example.com -w "
"/var/www/thing -d thing.net -d m.thing.net` (default: Ask)")
add("map", default={}, action=_WebrootMapAction,
help="JSON dictionary mapping domains to webroot paths; this "
"implies -d for each entry. You may need to escape this from "
"your shell. E.g.: --webroot-map "
'\'{"eg1.is,m.eg1.is":"/www/eg1/", "eg2.is":"/www/eg2"}\' '
"This option is merged with, but takes precedence over, -w / "
"-d entries. At present, if you put webroot-map in a config "
"file, it needs to be on a single line, like: webroot-map = "
'{"example.com":"/var/www"}.')
def auth_hint(self, failed_achalls: List[AnnotatedChallenge]) -> str: # pragma: no cover
return ("The Certificate Authority failed to download the temporary challenge files "
"created by Certbot. Ensure that the listed domains serve their content from "
"the provided --webroot-path/-w and that files created there can be downloaded "
"from the internet.")
def get_chall_pref(self, domain: str) -> Iterable[Type[challenges.Challenge]]:
# pylint: disable=unused-argument,missing-function-docstring
return [challenges.HTTP01]
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.full_roots: Dict[str, str] = {}
self.performed: DefaultDict[str, Set[AnnotatedChallenge]] = collections.defaultdict(set)
# stack of dirs successfully created by this authenticator
self._created_dirs: List[str] = []
def prepare(self) -> None: # pylint: disable=missing-function-docstring
pass
def perform(self, achalls: List[AnnotatedChallenge]) -> List[challenges.ChallengeResponse]: # pylint: disable=missing-function-docstring
self._set_webroots(achalls)
self._create_challenge_dirs()
return [self._perform_single(achall) for achall in achalls]
def _set_webroots(self, achalls: Iterable[AnnotatedChallenge]) -> None:
if self.conf("path"):
webroot_path = self.conf("path")[-1]
logger.info("Using the webroot path %s for all unmatched domains.",
webroot_path)
for achall in achalls:
self.conf("map").setdefault(achall.domain, webroot_path)
else:
known_webroots = list(set(self.conf("map").values()))
for achall in achalls:
if achall.domain not in self.conf("map"):
new_webroot = self._prompt_for_webroot(achall.domain,
known_webroots)
# Put the most recently input
# webroot first for easy selection
try:
known_webroots.remove(new_webroot)
except ValueError:
pass
known_webroots.insert(0, new_webroot)
self.conf("map")[achall.domain] = new_webroot
def _prompt_for_webroot(self, domain: str, known_webroots: List[str]) -> Optional[str]:
webroot = None
while webroot is None:
if known_webroots:
# Only show the menu if we have options for it
webroot = self._prompt_with_webroot_list(domain, known_webroots)
if webroot is None:
webroot = self._prompt_for_new_webroot(domain)
else:
# Allow prompt to raise PluginError instead of looping forever
webroot = self._prompt_for_new_webroot(domain, True)
return webroot
def _prompt_with_webroot_list(self, domain: str,
known_webroots: List[str]) -> Optional[str]:
path_flag = "--" + self.option_name("path")
while True:
code, index = display_util.menu(
"Select the webroot for {0}:".format(domain),
["Enter a new webroot"] + known_webroots,
cli_flag=path_flag, force_interactive=True)
if code == display_util.CANCEL:
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return None if index == 0 else known_webroots[index - 1] # code == display_util.OK
def _prompt_for_new_webroot(self, domain: str, allowraise: bool = False) -> Optional[str]:
code, webroot = ops.validated_directory(
_validate_webroot,
"Input the webroot for {0}:".format(domain),
force_interactive=True)
if code == display_util.CANCEL:
if not allowraise:
return None
raise errors.PluginError(
"Every requested domain must have a "
"webroot when using the webroot plugin.")
return _validate_webroot(webroot) # code == display_util.OK
def _create_challenge_dirs(self) -> None:
path_map = self.conf("map")
if not path_map:
raise errors.PluginError(
"Missing parts of webroot configuration; please set either "
"--webroot-path and --domains, or --webroot-map. Run with "
" --help webroot for examples.")
for name, path in path_map.items():
self.full_roots[name] = os.path.join(path, os.path.normcase(
challenges.HTTP01.URI_ROOT_PATH))
logger.debug("Creating root challenges validation dir at %s",
self.full_roots[name])
# Change the permissions to be writable (GH #1389)
# Umask is used instead of chmod to ensure the client can also
# run as non-root (GH #1795)
old_umask = filesystem.umask(0o022)
try:
# We ignore the last prefix in the next iteration,
# as it does not correspond to a folder path ('/' or 'C:')
for prefix in sorted(util.get_prefixes(self.full_roots[name])[:-1], key=len):
if os.path.isdir(prefix):
# Don't try to create directory if it already exists, as some filesystems
# won't reliably raise EEXIST or EISDIR if directory exists.
continue
try:
# Set owner as parent directory if possible, apply mode for Linux/Windows.
# For Linux, this is coupled with the "umask" call above because
# os.mkdir's "mode" parameter may not always work:
# https://docs.python.org/3/library/os.html#os.mkdir
filesystem.mkdir(prefix, 0o755)
self._created_dirs.append(prefix)
try:
filesystem.copy_ownership_and_apply_mode(
path, prefix, 0o755, copy_user=True, copy_group=True)
except (OSError, AttributeError) as exception:
logger.warning("Unable to change owner and uid of webroot directory")
logger.debug("Error was: %s", exception)
except OSError as exception:
raise errors.PluginError(
"Couldn't create root for {0} http-01 "
"challenge responses: {1}".format(name, exception))
finally:
filesystem.umask(old_umask)
# On Windows, generate a local web.config file that allows IIS to serve expose
# challenge files despite the fact they do not have a file extension.
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(self.full_roots[name], "web.config")
if os.path.exists(web_config_path):
logger.info("A web.config file has not been created in "
"%s because another one already exists.", self.full_roots[name])
continue
logger.info("Creating a web.config file in %s to allow IIS "
"to serve challenge files.", self.full_roots[name])
with safe_open(web_config_path, mode="w", chmod=0o644) as web_config:
web_config.write(_WEB_CONFIG_CONTENT)
def _get_validation_path(self, root_path: str, achall: AnnotatedChallenge) -> str:
return os.path.join(root_path, achall.chall.encode("token"))
def _perform_single(self, achall: AnnotatedChallenge) -> challenges.ChallengeResponse:
response, validation = achall.response_and_validation()
root_path = self.full_roots[achall.domain]
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Attempting to save validation to %s", validation_path)
# Change permissions to be world-readable, owner-writable (GH #1795)
old_umask = filesystem.umask(0o022)
try:
with safe_open(validation_path, mode="wb", chmod=0o644) as validation_file:
validation_file.write(validation.encode())
finally:
filesystem.umask(old_umask)
self.performed[root_path].add(achall)
return response
def cleanup(self, achalls: List[AnnotatedChallenge]) -> None: # pylint: disable=missing-function-docstring
for achall in achalls:
root_path = self.full_roots.get(achall.domain, None)
if root_path is not None:
validation_path = self._get_validation_path(root_path, achall)
logger.debug("Removing %s", validation_path)
os.remove(validation_path)
self.performed[root_path].remove(achall)
if not filesystem.POSIX_MODE:
web_config_path = os.path.join(root_path, "web.config")
if os.path.exists(web_config_path):
sha256sum = crypto_util.sha256sum(web_config_path)
if sha256sum in _WEB_CONFIG_SHA256SUMS:
logger.info("Cleaning web.config file generated by Certbot in %s.",
root_path)
os.remove(web_config_path)
else:
logger.info("Not cleaning up the web.config file in %s "
"because it is not generated by Certbot.", root_path)
not_removed: List[str] = []
while self._created_dirs:
path = self._created_dirs.pop()
try:
os.rmdir(path)
except OSError as exc:
not_removed.insert(0, path)
logger.info("Challenge directory %s was not empty, didn't remove", path)
logger.debug("Error was: %s", exc)
self._created_dirs = not_removed
logger.debug("All challenges cleaned up")
class _WebrootMapAction(argparse.Action):
"""Action class for parsing webroot_map."""
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace,
webroot_map: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> None:
if webroot_map is None:
return
for domains, webroot_path in json.loads(str(webroot_map)).items():
webroot_path = _validate_webroot(webroot_path)
namespace.webroot_map.update(
(d, webroot_path) for d in cli.add_domains(namespace, domains))
class _WebrootPathAction(argparse.Action):
"""Action class for parsing webroot_path."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._domain_before_webroot = False
def __call__(self, parser: argparse.ArgumentParser, namespace: argparse.Namespace,
webroot_path: Union[str, Sequence[Any], None],
option_string: Optional[str] = None) -> None:
if webroot_path is None:
return
if self._domain_before_webroot:
raise errors.PluginError(
"If you specify multiple webroot paths, "
"one of them must precede all domain flags")
if namespace.webroot_path:
# Apply previous webroot to all matched
# domains before setting the new webroot path
prev_webroot = namespace.webroot_path[-1]
for domain in namespace.domains:
namespace.webroot_map.setdefault(domain, prev_webroot)
elif namespace.domains:
self._domain_before_webroot = True
namespace.webroot_path.append(_validate_webroot(str(webroot_path)))
def _validate_webroot(webroot_path: str) -> str:
"""Validates and returns the absolute path of webroot_path.
:param str webroot_path: path to the webroot directory
:returns: absolute path of webroot_path
:rtype: str
"""
if not os.path.isdir(webroot_path):
raise errors.PluginError(webroot_path + " does not exist or is not a directory")
return os.path.abspath(webroot_path)
| |
from django import forms
from django.conf import settings
from django.core.validators import MinValueValidator
from django.urls import reverse, reverse_lazy
from django.utils.translation import npgettext_lazy, pgettext_lazy
from ...account.i18n import (
AddressForm as StorefrontAddressForm, PossiblePhoneNumberFormField,
clean_phone_for_country)
from ...account.models import User
from ...checkout.forms import QuantityField
from ...core.exceptions import InsufficientStock
from ...core.utils.taxes import ZERO_TAXED_MONEY
from ...discount.models import Voucher
from ...discount.utils import decrease_voucher_usage, increase_voucher_usage
from ...order import OrderStatus
from ...order.models import Fulfillment, FulfillmentLine, Order, OrderLine
from ...order.utils import (
add_variant_to_order, cancel_fulfillment, cancel_order,
change_order_line_quantity, delete_order_line, fulfill_order_line,
recalculate_order)
from ...payment import ChargeStatus, CustomPaymentChoices, PaymentError
from ...payment.utils import (
clean_mark_order_as_paid, gateway_capture, gateway_refund, gateway_void,
mark_order_as_paid)
from ...product.models import Product, ProductVariant
from ...product.utils import allocate_stock, deallocate_stock
from ...shipping.models import ShippingMethod
from ..forms import AjaxSelect2ChoiceField
from ..widgets import PhonePrefixWidget
from .utils import remove_customer_from_order, update_order_with_user_addresses
class CreateOrderFromDraftForm(forms.ModelForm):
"""Mark draft order as ready to fulfill."""
notify_customer = forms.BooleanField(
label=pgettext_lazy(
'Send email to customer about order created by staff users',
'Send email with order confirmation to the customer'),
required=False, initial=True)
class Meta:
model = Order
fields = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.instance.get_user_current_email():
self.fields.pop('notify_customer')
def clean(self):
super().clean()
errors = []
if self.instance.get_total_quantity() == 0:
errors.append(forms.ValidationError(pgettext_lazy(
'Create draft order form error',
'Could not create order without any products')))
if self.instance.is_shipping_required():
method = self.instance.shipping_method
shipping_address = self.instance.shipping_address
shipping_not_valid = (
method and shipping_address and
shipping_address.country.code not in method.shipping_zone.countries) # noqa
if shipping_not_valid:
errors.append(forms.ValidationError(pgettext_lazy(
'Create draft order form error',
'Shipping method is not valid for chosen shipping '
'address')))
if errors:
raise forms.ValidationError(errors)
return self.cleaned_data
def save(self):
self.instance.status = OrderStatus.UNFULFILLED
if self.instance.user:
self.instance.user_email = self.instance.user.email
remove_shipping_address = False
if not self.instance.is_shipping_required():
self.instance.shipping_method_name = None
self.instance.shipping_price = ZERO_TAXED_MONEY
if self.instance.shipping_address:
remove_shipping_address = True
super().save()
if remove_shipping_address:
self.instance.shipping_address.delete()
return self.instance
class OrderCustomerForm(forms.ModelForm):
"""Set customer details in an order."""
update_addresses = forms.BooleanField(
label=pgettext_lazy(
'Update an order with user default addresses',
'Set billing and shipping address in order to customer defaults'),
initial=True, required=False)
user = AjaxSelect2ChoiceField(
queryset=User.objects.all(),
fetch_data_url=reverse_lazy('dashboard:ajax-users-list'),
required=False,
label=pgettext_lazy(
'Order form: editing customer details - selecting a customer',
'Customer'))
class Meta:
model = Order
fields = ['user', 'user_email']
labels = {
'user_email': pgettext_lazy(
'Order customer email',
'Email')}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
user = self.instance.user
if user:
self.fields['user'].set_initial(user, label=user.get_ajax_label())
def clean(self):
cleaned_data = super().clean()
user_email = cleaned_data.get('user_email')
user = cleaned_data.get('user')
if user and user_email:
raise forms.ValidationError(pgettext_lazy(
'Edit customer details in order form error',
'An order can be related either with an email or an existing '
'user account'))
return self.cleaned_data
def save(self):
super().save()
if self.cleaned_data.get('update_addresses'):
update_order_with_user_addresses(self.instance)
return self.instance
class OrderRemoveCustomerForm(forms.ModelForm):
"""Remove customer data from an order."""
class Meta:
model = Order
fields = []
def save(self):
remove_customer_from_order(self.instance)
return self.instance
class OrderShippingForm(forms.ModelForm):
"""Set shipping name and shipping price in an order."""
shipping_method = AjaxSelect2ChoiceField(
queryset=ShippingMethod.objects.all(), min_input=0,
label=pgettext_lazy(
'Shipping method form field label', 'Shipping method'))
class Meta:
model = Order
fields = ['shipping_method']
def __init__(self, *args, **kwargs):
self.taxes = kwargs.pop('taxes')
super().__init__(*args, **kwargs)
method_field = self.fields['shipping_method']
fetch_data_url = reverse(
'dashboard:ajax-order-shipping-methods',
kwargs={'order_pk': self.instance.id})
method_field.set_fetch_data_url(fetch_data_url)
method = self.instance.shipping_method
if method:
method_field.set_initial(method, label=method.get_ajax_label())
if self.instance.shipping_address:
country_code = self.instance.shipping_address.country.code
queryset = method_field.queryset.filter(
shipping_zone__countries__contains=country_code)
method_field.queryset = queryset
def save(self, commit=True):
method = self.instance.shipping_method
self.instance.shipping_method_name = method.name
self.instance.shipping_price = method.get_total(self.taxes)
recalculate_order(self.instance)
return super().save(commit)
class OrderRemoveShippingForm(forms.ModelForm):
"""Remove shipping name and shipping price from an order."""
class Meta:
model = Order
fields = []
def save(self, commit=True):
self.instance.shipping_method = None
self.instance.shipping_method_name = None
self.instance.shipping_price = ZERO_TAXED_MONEY
recalculate_order(self.instance)
return super().save(commit)
class OrderEditDiscountForm(forms.ModelForm):
"""Edit discount amount in an order."""
class Meta:
model = Order
fields = ['discount_amount']
labels = {
'discount_amount': pgettext_lazy(
'Order discount amount fixed value',
'Discount amount')}
def save(self, commit=True):
recalculate_order(self.instance, update_voucher_discount=False)
return super().save(commit)
class OrderEditVoucherForm(forms.ModelForm):
"""Edit discount amount in an order."""
voucher = AjaxSelect2ChoiceField(
queryset=Voucher.objects.all(),
fetch_data_url=reverse_lazy('dashboard:ajax-vouchers'), min_input=0,
label=pgettext_lazy('Order voucher', 'Voucher'))
class Meta:
model = Order
fields = ['voucher']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.old_voucher = self.instance.voucher
if self.instance.voucher:
self.fields['voucher'].set_initial(self.instance.voucher)
def save(self, commit=True):
voucher = self.instance.voucher
if self.old_voucher != voucher:
if self.old_voucher:
decrease_voucher_usage(self.old_voucher)
increase_voucher_usage(voucher)
self.instance.discount_name = voucher.name or ''
self.instance.translated_discount_name = (
voucher.translated.name
if voucher.translated.name != voucher.name else '')
recalculate_order(self.instance)
return super().save(commit)
class OrderNoteForm(forms.Form):
message = forms.CharField(
label=pgettext_lazy('Order note', 'Note'), widget=forms.Textarea())
class BasePaymentForm(forms.Form):
amount = forms.DecimalField(
label=pgettext_lazy(
'Payment management form (capture, refund, void)', 'Amount'),
max_digits=settings.DEFAULT_MAX_DIGITS,
decimal_places=settings.DEFAULT_DECIMAL_PLACES)
clean_error = pgettext_lazy(
'Payment form error',
'This payment action can not be performed.')
def __init__(self, *args, **kwargs):
self.payment = kwargs.pop('payment')
super().__init__(*args, **kwargs)
def payment_error(self, message):
self.add_error(
None, pgettext_lazy(
'Payment form error', 'Payment gateway error: %s') % message)
def try_payment_action(self, action):
amount = self.cleaned_data['amount']
try:
action(self.payment, amount)
except (PaymentError, ValueError) as e:
self.payment_error(str(e))
return False
return True
class CapturePaymentForm(BasePaymentForm):
clean_error = pgettext_lazy(
'Payment form error',
'Only pre-authorized payments can be captured')
def clean(self):
if not self.payment.can_capture():
raise forms.ValidationError(self.clean_error)
def capture(self):
return self.try_payment_action(gateway_capture)
class RefundPaymentForm(BasePaymentForm):
clean_error = pgettext_lazy(
'Payment form error',
'Only confirmed payments can be refunded')
def clean(self):
if not self.payment.can_refund():
raise forms.ValidationError(self.clean_error)
if self.payment.gateway == CustomPaymentChoices.MANUAL:
raise forms.ValidationError(
pgettext_lazy(
'Payment form error',
'Manual payments can not be refunded'))
def refund(self):
return self.try_payment_action(gateway_refund)
class VoidPaymentForm(BasePaymentForm):
clean_error = pgettext_lazy(
'Payment form error',
'Only pre-authorized payments can be voided')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.payment = kwargs.pop('payment')
# The amount field is popped out
# since there is no amount argument for void operation
self.fields.pop('amount')
def clean(self):
if not self.payment.can_void():
raise forms.ValidationError(self.clean_error)
def void(self):
try:
gateway_void(self.payment)
except (PaymentError, ValueError) as e:
self.payment_error(str(e))
return False
return True
class OrderMarkAsPaidForm(forms.Form):
"""Mark order as manually paid."""
def __init__(self, *args, **kwargs):
self.order = kwargs.pop('order')
self.user = kwargs.pop('user')
super().__init__(*args, **kwargs)
def clean(self):
super().clean()
try:
clean_mark_order_as_paid(self.order)
except PaymentError as e:
raise forms.ValidationError(str(e))
def save(self):
mark_order_as_paid(self.order, self.user)
class CancelOrderLineForm(forms.Form):
def __init__(self, *args, **kwargs):
self.line = kwargs.pop('line')
super().__init__(*args, **kwargs)
def cancel_line(self):
if self.line.variant and self.line.variant.track_inventory:
deallocate_stock(self.line.variant, self.line.quantity)
order = self.line.order
delete_order_line(self.line)
recalculate_order(order)
class ChangeQuantityForm(forms.ModelForm):
quantity = QuantityField(
validators=[MinValueValidator(1)],
label=pgettext_lazy('Integer number', 'Quantity'))
class Meta:
model = OrderLine
fields = ['quantity']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.initial_quantity = self.instance.quantity
self.fields['quantity'].initial = self.initial_quantity
def clean_quantity(self):
quantity = self.cleaned_data['quantity']
delta = quantity - self.initial_quantity
variant = self.instance.variant
if variant and delta > variant.quantity_available:
raise forms.ValidationError(
npgettext_lazy(
'Change quantity form error',
'Only %(remaining)d remaining in stock.',
'Only %(remaining)d remaining in stock.',
number='remaining') % {
'remaining': (
self.initial_quantity + variant.quantity_available)}) # noqa
return quantity
def save(self):
quantity = self.cleaned_data['quantity']
variant = self.instance.variant
if variant and variant.track_inventory:
# update stock allocation
delta = quantity - self.initial_quantity
allocate_stock(variant, delta)
change_order_line_quantity(self.instance, quantity)
recalculate_order(self.instance.order)
return self.instance
class CancelOrderForm(forms.Form):
"""Allow canceling an entire order.
Deallocate or increase corresponding stocks for each order line.
"""
restock = forms.BooleanField(initial=True, required=False)
def __init__(self, *args, **kwargs):
self.order = kwargs.pop('order')
super().__init__(*args, **kwargs)
self.fields['restock'].label = npgettext_lazy(
'Cancel order form action',
'Restock %(quantity)d item',
'Restock %(quantity)d items',
number='quantity') % {'quantity': self.order.get_total_quantity()}
def clean(self):
data = super().clean()
if not self.order.can_cancel():
raise forms.ValidationError(
pgettext_lazy(
'Cancel order form error',
"This order can't be canceled"))
return data
def cancel_order(self):
cancel_order(self.order, self.cleaned_data.get('restock'))
class CancelFulfillmentForm(forms.Form):
"""Allow canceling an entire fulfillment.
Increase corresponding stocks for each fulfillment line.
"""
restock = forms.BooleanField(initial=True, required=False)
def __init__(self, *args, **kwargs):
self.fulfillment = kwargs.pop('fulfillment')
super().__init__(*args, **kwargs)
self.fields['restock'].label = npgettext_lazy(
'Cancel fulfillment form action',
'Restock %(quantity)d item',
'Restock %(quantity)d items',
number='quantity') % {'quantity': self.fulfillment.get_total_quantity()}
def clean(self):
data = super().clean()
if not self.fulfillment.can_edit():
raise forms.ValidationError(
pgettext_lazy(
'Cancel fulfillment form error',
'This fulfillment can\'t be canceled'))
return data
def cancel_fulfillment(self):
cancel_fulfillment(self.fulfillment, self.cleaned_data.get('restock'))
class FulfillmentTrackingNumberForm(forms.ModelForm):
"""Update tracking number in fulfillment group."""
send_mail = forms.BooleanField(
initial=True, required=False, label=pgettext_lazy(
'Send mail to customer',
'Send notification email to customer'))
class Meta:
model = Fulfillment
fields = ['tracking_number']
labels = {
'tracking_number': pgettext_lazy(
'Fulfillment record', 'Tracking number')}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.instance.order.get_user_current_email():
self.fields.pop('send_mail')
class OrderRemoveVoucherForm(forms.ModelForm):
"""Remove voucher from order. Decrease usage and recalculate order."""
class Meta:
model = Order
fields = []
def clean(self):
data = super().clean()
if not self.instance.voucher:
raise forms.ValidationError(
pgettext_lazy(
'Remove voucher form error',
'This order has no voucher'))
return data
def remove_voucher(self):
decrease_voucher_usage(self.instance.voucher)
self.instance.discount_amount = 0
self.instance.discount_name = ''
self.instance.translated_discount_name = ''
self.instance.voucher = None
recalculate_order(self.instance)
PAYMENT_STATUS_CHOICES = (
[('', pgettext_lazy('Payment status field value', 'All'))] +
ChargeStatus.CHOICES)
class PaymentFilterForm(forms.Form):
status = forms.ChoiceField(choices=PAYMENT_STATUS_CHOICES)
class AddVariantToOrderForm(forms.Form):
"""Allow adding lines with given quantity to an order."""
variant = AjaxSelect2ChoiceField(
queryset=ProductVariant.objects.filter(
product__in=Product.objects.published()),
fetch_data_url=reverse_lazy('dashboard:ajax-available-variants'),
label=pgettext_lazy(
'Order form: subform to add variant to order form: variant field',
'Variant'))
quantity = QuantityField(
label=pgettext_lazy(
'Add variant to order form label', 'Quantity'),
validators=[MinValueValidator(1)])
def __init__(self, *args, **kwargs):
self.order = kwargs.pop('order')
self.discounts = kwargs.pop('discounts')
self.taxes = kwargs.pop('taxes')
super().__init__(*args, **kwargs)
def clean(self):
"""Check if given quantity is available in stocks."""
cleaned_data = super().clean()
variant = cleaned_data.get('variant')
quantity = cleaned_data.get('quantity')
if variant and quantity is not None:
try:
variant.check_quantity(quantity)
except InsufficientStock as e:
error = forms.ValidationError(
pgettext_lazy(
'Add item form error',
'Could not add item. '
'Only %(remaining)d remaining in stock.' %
{'remaining': e.item.quantity_available}))
self.add_error('quantity', error)
return cleaned_data
def save(self):
"""Add variant to order.
Updates stocks and order.
"""
variant = self.cleaned_data.get('variant')
quantity = self.cleaned_data.get('quantity')
add_variant_to_order(
self.order, variant, quantity, self.discounts, self.taxes)
recalculate_order(self.order)
class AddressForm(StorefrontAddressForm):
phone = PossiblePhoneNumberFormField(
widget=PhonePrefixWidget, required=False,
label=pgettext_lazy(
'Order form: address subform - phone number input field',
'Phone number'))
def clean(self):
data = super().clean()
phone = data.get('phone')
country = data.get('country')
if phone:
try:
data['phone'] = clean_phone_for_country(phone, country)
except forms.ValidationError as error:
self.add_error('phone', error)
return data
class FulfillmentForm(forms.ModelForm):
"""Create fulfillment group for a given order."""
send_mail = forms.BooleanField(
initial=True, required=False, label=pgettext_lazy(
'Send mail to customer',
'Send shipment details to your customer now'))
class Meta:
model = Fulfillment
fields = ['tracking_number']
labels = {
'tracking_number': pgettext_lazy(
'Order tracking number',
'Tracking number')}
def __init__(self, *args, **kwargs):
order = kwargs.pop('order')
super().__init__(*args, **kwargs)
self.instance.order = order
if not order.get_user_current_email():
self.fields.pop('send_mail')
class BaseFulfillmentLineFormSet(forms.BaseModelFormSet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for form in self.forms:
form.empty_permitted = False
def clean(self):
total_quantity = sum(
form.cleaned_data.get('quantity', 0) for form in self.forms)
if total_quantity <= 0:
raise forms.ValidationError(
'Total quantity must be larger than 0.')
class FulfillmentLineForm(forms.ModelForm):
"""Fulfill order line with given quantity by decreasing stock."""
class Meta:
model = FulfillmentLine
fields = ['order_line', 'quantity']
def clean_quantity(self):
quantity = self.cleaned_data.get('quantity')
order_line = self.cleaned_data.get('order_line')
if quantity > order_line.quantity_unfulfilled:
raise forms.ValidationError(npgettext_lazy(
'Fulfill order line form error',
'%(quantity)d item remaining to fulfill.',
'%(quantity)d items remaining to fulfill.',
number='quantity') % {
'quantity': order_line.quantity_unfulfilled,
'order_line': order_line})
return quantity
def save(self, commit=True):
fulfill_order_line(self.instance.order_line, self.instance.quantity)
return super().save(commit)
| |
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import netaddr
from sqlalchemy import func
from sqlalchemy import not_
from sqlalchemy.orm import ColumnProperty
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import object_mapper
import nailgun.rpc as rpc
from nailgun import objects
from nailgun.consts import NODE_STATUSES
from nailgun.db import db
from nailgun.db.sqlalchemy.models import CapacityLog
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import Node
from nailgun.errors import errors
from nailgun.logger import logger
from nailgun.network.checker import NetworkCheck
from nailgun.orchestrator import deployment_serializers
from nailgun.orchestrator import provisioning_serializers
from nailgun.settings import settings
from nailgun.task.fake import FAKE_THREADS
from nailgun.task.helpers import TaskHelper
from nailgun.utils.zabbix import ZabbixManager
def make_astute_message(method, respond_to, args):
return {
'api_version': settings.VERSION['api'],
'method': method,
'respond_to': respond_to,
'args': args
}
def fake_cast(queue, messages, **kwargs):
def make_thread(message, join_to=None):
thread = FAKE_THREADS[message['method']](
data=message,
params=kwargs,
join_to=join_to
)
logger.debug("Fake thread called: data: %s, params: %s",
message, kwargs)
thread.start()
thread.name = message['method'].upper()
return thread
if isinstance(messages, (list,)):
thread = None
for m in messages:
thread = make_thread(m, join_to=thread)
else:
make_thread(messages)
if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
rpc.cast = fake_cast
class DeploymentTask(object):
# LOGIC
# Use cases:
# 1. Cluster exists, node(s) added
# If we add one node to existing OpenStack cluster, other nodes may require
# updates (redeployment), but they don't require full system reinstallation.
# How to: run deployment for all nodes which system type is target.
# Run provisioning first and then deployment for nodes which are in
# discover system type.
# Q: Should we care about node status (provisioning, error, deploying)?
# A: offline - when node doesn't respond (agent doesn't run, not
# implemented); let's say user should remove this node from
# cluster before deployment.
# ready - target OS is loaded and node is Ok, we redeploy
# ready nodes only if cluster has pending changes i.e.
# network or cluster attrs were changed
# discover - in discovery mode, provisioning is required
# provisioning - at the time of task execution there should not be such
# case. If there is - previous provisioning has failed.
# Possible solution would be to try again to provision
# deploying - the same as provisioning, but stucked in previous deploy,
# solution - try to deploy. May loose some data if reprovis.
# error - recognized error in deployment or provisioning... We have to
# know where the error was. If in deployment - reprovisioning may
# not be a solution (can loose data). If in provisioning - can do
# provisioning & deployment again
# 2. New cluster, just added nodes
# Provision first, and run deploy as second
# 3. Remove some and add some another node
# Deletion task will run first and will actually remove nodes, include
# removal from DB.. however removal from DB happens when remove_nodes_resp
# is ran. It means we have to filter nodes and not to run deployment on
# those which are prepared for removal.
@classmethod
def message(cls, task, nodes):
logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
nodes_ids = [n.id for n in nodes]
for n in db().query(Node).filter_by(
cluster=task.cluster).order_by(Node.id):
if n.id in nodes_ids:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
# If reciever for some reasons didn't update
# node's status to provisioned when deployment
# started, we should do it in nailgun
if n.status in (NODE_STATUSES.deploying,):
n.status = NODE_STATUSES.provisioned
n.progress = 0
db().add(n)
db().flush()
# here we replace deployment data if user redefined them
serialized_cluster = deployment_serializers.serialize(
task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
db().commit()
return make_astute_message(
'deploy',
'deploy_resp',
{
'task_uuid': task.uuid,
'deployment_info': serialized_cluster
}
)
class UpdateTask(object):
@classmethod
def message(cls, task, nodes):
logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)
for n in nodes:
if n.pending_roles:
n.roles += n.pending_roles
n.pending_roles = []
n.status = 'provisioned'
n.progress = 0
# here we replace deployment data if user redefined them
serialized_cluster = deployment_serializers.serialize(
task.cluster, nodes)
# After serialization set pending_addition to False
for node in nodes:
node.pending_addition = False
db().commit()
return make_astute_message(
'deploy',
'deploy_resp',
{
'task_uuid': task.uuid,
'deployment_info': serialized_cluster
}
)
class ProvisionTask(object):
@classmethod
def message(cls, task, nodes_to_provisioning):
logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
task = objects.Task.get_by_uid(
task.id,
fail_if_not_found=True,
lock_for_update=True
)
objects.NodeCollection.lock_nodes(nodes_to_provisioning)
serialized_cluster = provisioning_serializers.serialize(
task.cluster, nodes_to_provisioning)
for node in nodes_to_provisioning:
if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
continue
admin_net_id = objects.Node.get_network_manager(
node
).get_admin_network_group_id()
TaskHelper.prepare_syslog_dir(node, admin_net_id)
db().commit()
return make_astute_message(
'provision',
'provision_resp',
{
'task_uuid': task.uuid,
'provisioning_info': serialized_cluster
}
)
class DeletionTask(object):
@classmethod
def execute(cls, task, respond_to='remove_nodes_resp'):
logger.debug("DeletionTask.execute(task=%s)" % task.uuid)
task_uuid = task.uuid
logger.debug("Nodes deletion task is running")
nodes_to_delete = []
nodes_to_restore = []
USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP
# no need to call astute if there are no nodes in cluster
if respond_to == 'remove_cluster_resp' and \
not list(task.cluster.nodes):
rcvr = rpc.receiver.NailgunReceiver()
rcvr.remove_cluster_resp(
task_uuid=task_uuid,
status='ready',
progress=100
)
return
for node in task.cluster.nodes:
if node.pending_deletion:
nodes_to_delete.append({
'id': node.id,
'uid': node.id,
'roles': node.roles,
'slave_name': objects.Node.make_slave_name(node)
})
if USE_FAKE:
# only fake tasks
new_node = {}
keep_attrs = (
'id',
'cluster_id',
'roles',
'pending_deletion',
'pending_addition'
)
for prop in object_mapper(node).iterate_properties:
if isinstance(
prop, ColumnProperty
) and prop.key not in keep_attrs:
new_node[prop.key] = getattr(node, prop.key)
nodes_to_restore.append(new_node)
# /only fake tasks
# check if there's a zabbix server in an environment
# and if there is, remove hosts
if ZabbixManager.get_zabbix_node(task.cluster):
zabbix_credentials = ZabbixManager.get_zabbix_credentials(
task.cluster
)
logger.debug("Removing nodes %s from zabbix" % (nodes_to_delete))
try:
ZabbixManager.remove_from_zabbix(
zabbix_credentials, nodes_to_delete
)
except (errors.CannotMakeZabbixRequest,
errors.ZabbixRequestError) as e:
logger.warning("%s, skipping removing nodes from Zabbix", e)
# this variable is used to iterate over it
# and be able to delete node from nodes_to_delete safely
nodes_to_delete_constant = list(nodes_to_delete)
# locking nodes
nodes_ids = [node['id'] for node in nodes_to_delete_constant]
nodes_db = objects.NodeCollection.filter_by_list(
None,
'id',
nodes_ids,
order_by='id'
)
objects.NodeCollection.lock_for_update(nodes_db).all()
for node in nodes_to_delete_constant:
node_db = objects.Node.get_by_uid(node['id'], lock_for_update=True)
slave_name = objects.Node.make_slave_name(node_db)
logger.debug("Removing node from database and pending it "
"to clean its MBR: %s", slave_name)
if node_db.status == 'discover':
logger.info(
"Node is not deployed yet,"
" can't clean MBR: %s", slave_name)
db().delete(node_db)
db().flush()
nodes_to_delete.remove(node)
db().commit()
msg_delete = make_astute_message(
'remove_nodes',
respond_to,
{
'task_uuid': task.uuid,
'nodes': nodes_to_delete,
'engine': {
'url': settings.COBBLER_URL,
'username': settings.COBBLER_USER,
'password': settings.COBBLER_PASSWORD,
'master_ip': settings.MASTER_IP,
}
}
)
# only fake tasks
if USE_FAKE and nodes_to_restore:
msg_delete['args']['nodes_to_restore'] = nodes_to_restore
# /only fake tasks
logger.debug("Calling rpc remove_nodes method")
rpc.cast('naily', msg_delete)
class StopDeploymentTask(object):
@classmethod
def message(cls, task, stop_task):
nodes_to_stop = db().query(Node).filter(
Node.cluster_id == task.cluster.id
).filter(
not_(Node.status == 'ready')
).yield_per(100)
return make_astute_message(
"stop_deploy_task",
"stop_deployment_resp",
{
"task_uuid": task.uuid,
"stop_task_uuid": stop_task.uuid,
"nodes": [
{
'uid': n.uid,
'roles': n.roles,
'slave_name': objects.Node.make_slave_name(n),
'admin_ip': objects.Node.get_network_manager(
n
).get_admin_ip_for_node(n)
} for n in nodes_to_stop
],
"engine": {
"url": settings.COBBLER_URL,
"username": settings.COBBLER_USER,
"password": settings.COBBLER_PASSWORD,
"master_ip": settings.MASTER_IP,
}
}
)
@classmethod
def execute(cls, task, deploy_task, provision_task):
if provision_task:
rpc.cast(
'naily',
cls.message(task, provision_task),
service=True
)
if deploy_task:
rpc.cast(
'naily',
cls.message(task, deploy_task),
service=True
)
class ResetEnvironmentTask(object):
@classmethod
def message(cls, task):
nodes_to_reset = db().query(Node).filter(
Node.cluster_id == task.cluster.id
).yield_per(100)
return make_astute_message(
"reset_environment",
"reset_environment_resp",
{
"task_uuid": task.uuid,
"nodes": [
{
'uid': n.uid,
'roles': n.roles,
'slave_name': objects.Node.make_slave_name(n)
} for n in nodes_to_reset
],
"engine": {
"url": settings.COBBLER_URL,
"username": settings.COBBLER_USER,
"password": settings.COBBLER_PASSWORD,
"master_ip": settings.MASTER_IP,
}
}
)
@classmethod
def execute(cls, task):
rpc.cast('naily', cls.message(task))
class ClusterDeletionTask(object):
@classmethod
def execute(cls, task):
logger.debug("Cluster deletion task is running")
DeletionTask.execute(task, 'remove_cluster_resp')
class BaseNetworkVerification(object):
def __init__(self, task, config):
self.task = task
self.config = config
def get_message_body(self):
nodes = []
for n in self.task.cluster.nodes:
node_json = {'uid': n.id, 'networks': []}
for nic in n.nic_interfaces:
assigned_networks = nic.assigned_networks_list
# in case of using bond interface - use networks assigned
# to bond
if nic.bond:
assigned_networks = nic.bond.assigned_networks_list
vlans = []
for ng in assigned_networks:
# Handle FuelWeb admin network first.
if not ng.cluster_id:
vlans.append(0)
continue
data_ng = filter(lambda i: i['name'] == ng.name,
self.config)[0]
if data_ng['vlans']:
vlans.extend(data_ng['vlans'])
else:
# in case absence of vlans net_probe will
# send packages on untagged iface
vlans.append(0)
if not vlans:
continue
node_json['networks'].append(
{'iface': nic.name, 'vlans': vlans}
)
nodes.append(node_json)
return nodes
def get_message(self):
nodes = self.get_message_body()
message = make_astute_message(
self.task.name,
'{0}_resp'.format(self.task.name),
{
'task_uuid': self.task.uuid,
'nodes': nodes
}
)
self.task.cache = message
return message
def execute(self, task=None):
# task is there for prev compatibility
message = self.get_message()
logger.debug("%s method is called with: %s",
self.task.name, message)
db().commit()
rpc.cast('naily', message)
@classmethod
def enabled(cls, cluster):
"""Should be used to verify that subtask is enabled based on
cluster configuration
"""
return True
class VerifyNetworksTask(BaseNetworkVerification):
def __init__(self, *args):
super(VerifyNetworksTask, self).__init__(*args)
self.subtasks = []
def add_subtask(self, subtask):
self.subtasks.append(subtask.get_message())
def get_message(self):
message = super(VerifyNetworksTask, self).get_message()
message['subtasks'] = self.subtasks
return message
class CheckDhcpTask(BaseNetworkVerification):
"""Task for dhcp verification
"""
class MulticastVerificationTask(BaseNetworkVerification):
def __init__(self, task):
corosync = task.cluster.attributes['editable']['corosync']
group = corosync['group']['value']
port = corosync['port']['value']
conf = {'group': group, 'port': port}
super(MulticastVerificationTask, self).__init__(task, conf)
def get_message_body(self):
# multicast verification should be done only for network which
# corosync uses for communication - management in our case
all_nics = objects.cluster.Cluster.get_ifaces_for_network_in_cluster(
self.task.cluster, 'management')
return [dict(self.config, iface=nic[1], uid=str(nic[0]))
for nic in all_nics]
@classmethod
def enabled(cls, cluster):
"""Multicast should be enabled only in case 'corosync' section
is present in editable attributes, which is not the case if cluster
was upgraded from 5.0
"""
#TODO(dshulyak) enable it, when it will be possible to upgrade
# mcagent and network checker for old envs
return False
class CheckNetworksTask(object):
@classmethod
def execute(cls, task, data, check_admin_untagged=False):
checker = NetworkCheck(task, data)
checker.check_configuration()
if check_admin_untagged:
warn_msgs = checker.check_interface_mapping()
if warn_msgs:
task.result = {"warning": warn_msgs}
db().commit()
class CheckBeforeDeploymentTask(object):
@classmethod
def execute(cls, task):
cls._check_nodes_are_online(task)
cls._check_controllers_count(task)
cls._check_disks(task)
cls._check_ceph(task)
cls._check_volumes(task)
cls._check_network(task)
@classmethod
def _check_nodes_are_online(cls, task):
offline_nodes = db().query(Node).\
filter(Node.cluster == task.cluster).\
filter_by(online=False).\
filter_by(pending_deletion=False).\
filter(not_(Node.status.in_(['ready'])))
if offline_nodes.count():
node_names = ','.join(map(lambda n: n.full_name, offline_nodes))
raise errors.NodeOffline(
u'Nodes "{0}" are offline.'
' Remove them from environment '
'and try again.'.format(node_names))
@classmethod
def _check_controllers_count(cls, task):
controllers_count = len(filter(
lambda node: 'controller' in node.all_roles,
task.cluster.nodes)
)
cluster_mode = task.cluster.mode
if cluster_mode == 'multinode' and controllers_count < 1:
raise errors.NotEnoughControllers(
"Not enough controllers, %s mode requires at least 1 "
"controller" % (cluster_mode))
elif cluster_mode == 'ha_compact' and controllers_count < 1:
raise errors.NotEnoughControllers(
"Not enough controllers, %s mode requires at least 1 "
"controller" % (cluster_mode))
@classmethod
def _check_disks(cls, task):
try:
for node in task.cluster.nodes:
if cls._is_disk_checking_required(node):
node.volume_manager.check_disk_space_for_deployment()
except errors.NotEnoughFreeSpace:
raise errors.NotEnoughFreeSpace(
u"Node '{0}' has insufficient disk space".format(
node.human_readable_name
)
)
@classmethod
def _check_volumes(cls, task):
try:
for node in task.cluster.nodes:
if cls._is_disk_checking_required(node):
node.volume_manager.check_volume_sizes_for_deployment()
except errors.NotEnoughFreeSpace as e:
raise errors.NotEnoughFreeSpace(
u"Node '%s' has insufficient disk space\n%s" % (
node.human_readable_name, e.message))
@classmethod
def _check_ceph(cls, task):
storage = objects.Attributes.merged_attrs(
task.cluster.attributes
)['storage']
for option in storage:
if '_ceph' in option and\
storage[option] and\
storage[option]['value'] is True:
cls._check_ceph_osds(task)
return
@classmethod
def _is_disk_checking_required(cls, node):
"""Disk checking required in case if node is not provisioned.
"""
if node.status in ('ready', 'deploying', 'provisioned') or \
(node.status == 'error' and node.error_type != 'provision'):
return False
return True
@classmethod
def _check_ceph_osds(cls, task):
osd_count = len(filter(
lambda node: 'ceph-osd' in node.all_roles,
task.cluster.nodes))
osd_pool_size = int(objects.Attributes.merged_attrs(
task.cluster.attributes
)['storage']['osd_pool_size']['value'])
if osd_count < osd_pool_size:
raise errors.NotEnoughOsdNodes(
'Number of OSD nodes (%s) cannot be less than '
'the Ceph object replication factor (%s). '
'Please either assign ceph-osd role to more nodes, '
'or reduce Ceph replication factor in the Settings tab.' %
(osd_count, osd_pool_size))
@classmethod
def _check_network(cls, task):
nodes_count = len(task.cluster.nodes)
public_network = filter(
lambda ng: ng.name == 'public',
task.cluster.network_groups)[0]
public_network_size = cls.__network_size(public_network)
if public_network_size < nodes_count:
error_message = cls.__format_network_error(nodes_count)
raise errors.NetworkCheckError(error_message)
@classmethod
def __network_size(cls, network):
return sum(len(netaddr.IPRange(ip_range.first, ip_range.last))
for ip_range in network.ip_ranges)
@classmethod
def __format_network_error(cls, nodes_count):
return 'Not enough IP addresses. Public network must have at least '\
'{nodes_count} IP addresses '.format(nodes_count=nodes_count) + \
'for the current environment.'
class DumpTask(object):
@classmethod
def conf(cls):
logger.debug("Preparing config for snapshot")
nodes = db().query(Node).filter(
Node.status.in_(['ready', 'provisioned', 'deploying', 'error'])
).all()
dump_conf = deepcopy(settings.DUMP)
dump_conf['dump']['slave']['hosts'] = [
{
'address': n.fqdn,
'ssh-key': settings.SHOTGUN_SSH_KEY,
} for n in nodes
]
# render postgres connection data in dump settings
dump_conf['dump']['local']['objects'].append({
'type': 'postgres',
'dbhost': settings.DATABASE['host'],
'dbname': settings.DATABASE['name'],
'username': settings.DATABASE['user'],
'password': settings.DATABASE['passwd'],
})
# render cobbler coonection data in dump settings
# NOTE: we no need user/password for cobbler
dump_conf['dump']['local']['objects'].append({
'type': 'xmlrpc',
'server': settings.COBBLER_URL,
'methods': [
'get_distros',
'get_profiles',
'get_systems',
],
'to_file': 'cobbler.txt',
})
# inject master host
dump_conf['dump']['master']['hosts'] = [{
'address': settings.MASTER_IP,
'ssh-key': settings.SHOTGUN_SSH_KEY,
}]
logger.debug("Dump conf: %s", str(dump_conf))
return dump_conf
@classmethod
def execute(cls, task):
logger.debug("DumpTask: task={0}".format(task.uuid))
message = make_astute_message(
'dump_environment',
'dump_environment_resp',
{
'task_uuid': task.uuid,
'settings': cls.conf()
}
)
task.cache = message
db().add(task)
db().commit()
rpc.cast('naily', message)
class GenerateCapacityLogTask(object):
@classmethod
def execute(cls, task):
logger.debug("GenerateCapacityLogTask: task=%s" % task.uuid)
unallocated_nodes = db().query(Node).filter_by(cluster_id=None).count()
# Use Node.cluster_id != (None) for PEP-8 accordance.
allocated_nodes = db().query(Node).\
filter(Node.cluster_id != (None)).count()
node_allocation = db().query(Cluster, func.count(Node.id)).\
outerjoin(Node).group_by(Cluster)
env_stats = []
for allocation in node_allocation:
env_stats.append({'cluster': allocation[0].name,
'nodes': allocation[1]})
allocation_stats = {'allocated': allocated_nodes,
'unallocated': unallocated_nodes}
fuel_data = {
"release": settings.VERSION['release'],
"uuid": settings.FUEL_KEY
}
nodes = db().query(Node).options(
joinedload('role_list'))
roles_stat = {}
for node in nodes:
if node.roles:
roles_list = '+'.join(sorted(node.roles))
if roles_list in roles_stat:
roles_stat[roles_list] += 1
else:
roles_stat[roles_list] = 1
capacity_data = {'environment_stats': env_stats,
'allocation_stats': allocation_stats,
'fuel_data': fuel_data,
'roles_stat': roles_stat}
capacity_log = CapacityLog()
capacity_log.report = capacity_data
db().add(capacity_log)
db().flush()
task.result = {'log_id': capacity_log.id}
task.status = 'ready'
task.progress = '100'
db().add(task)
db().commit()
| |
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: gettext.py
"""Internationalization and localization support.
This module provides internationalization (I18N) and localization (L10N)
support for your Python programs by providing an interface to the GNU gettext
message catalog library.
I18N refers to the operation by which a program is made aware of multiple
languages. L10N refers to the adaptation of your program, once
internationalized, to the local language and cultural habits.
"""
import locale
import copy
import os
import re
import struct
import sys
from errno import ENOENT
__all__ = [
'NullTranslations', 'GNUTranslations', 'Catalog',
'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
'dgettext', 'dngettext', 'gettext', 'ngettext']
_default_localedir = os.path.join(sys.prefix, 'share', 'locale')
def test(condition, true, false):
"""
Implements the C expression:
condition ? true : false
Required to correctly interpret plural forms.
"""
if condition:
return true
else:
return false
def c2py(plural):
"""Gets a C expression as used in PO files for plural forms and returns a
Python lambda function that implements an equivalent expression.
"""
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import token
import tokenize
tokens = tokenize.generate_tokens(StringIO(plural).readline)
try:
danger = [ x for x in tokens if x[0] == token.NAME and x[1] != 'n' ]
except tokenize.TokenError:
raise ValueError, 'plural forms expression error, maybe unbalanced parenthesis'
else:
if danger:
raise ValueError, 'plural forms expression could be dangerous'
plural = plural.replace('&&', ' and ')
plural = plural.replace('||', ' or ')
expr = re.compile('\\!([^=])')
plural = expr.sub(' not \\1', plural)
expr = re.compile('(.*?)\\?(.*?):(.*)')
def repl(x):
return 'test(%s, %s, %s)' % (x.group(1), x.group(2),
expr.sub(repl, x.group(3)))
stack = [
'']
for c in plural:
if c == '(':
stack.append('')
elif c == ')':
if len(stack) == 1:
raise ValueError, 'unbalanced parenthesis in plural form'
s = expr.sub(repl, stack.pop())
stack[-1] += '(%s)' % s
else:
stack[-1] += c
plural = expr.sub(repl, stack.pop())
return eval('lambda n: int(%s)' % plural)
def _expand_lang(locale):
from locale import normalize
locale = normalize(locale)
COMPONENT_CODESET = 1
COMPONENT_TERRITORY = 2
COMPONENT_MODIFIER = 4
mask = 0
pos = locale.find('@')
if pos >= 0:
modifier = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = locale.find('.')
if pos >= 0:
codeset = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = locale.find('_')
if pos >= 0:
territory = locale[pos:]
locale = locale[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = locale
ret = []
for i in range(mask + 1):
if not i & ~mask:
val = language
if i & COMPONENT_TERRITORY:
val += territory
if i & COMPONENT_CODESET:
val += codeset
if i & COMPONENT_MODIFIER:
val += modifier
ret.append(val)
ret.reverse()
return ret
class NullTranslations:
def __init__(self, fp=None):
self._info = {}
self._charset = None
self._output_charset = None
self._fallback = None
if fp is not None:
self._parse(fp)
return
def _parse(self, fp):
pass
def add_fallback(self, fallback):
if self._fallback:
self._fallback.add_fallback(fallback)
else:
self._fallback = fallback
def gettext(self, message):
if self._fallback:
return self._fallback.gettext(message)
return message
def lgettext(self, message):
if self._fallback:
return self._fallback.lgettext(message)
return message
def ngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
else:
if n == 1:
return msgid1
return msgid2
def lngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
else:
if n == 1:
return msgid1
return msgid2
def ugettext(self, message):
if self._fallback:
return self._fallback.ugettext(message)
return unicode(message)
def ungettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.ungettext(msgid1, msgid2, n)
else:
if n == 1:
return unicode(msgid1)
return unicode(msgid2)
def info(self):
return self._info
def charset(self):
return self._charset
def output_charset(self):
return self._output_charset
def set_output_charset(self, charset):
self._output_charset = charset
def install(self, unicode=False, names=None):
import __builtin__
__builtin__.__dict__['_'] = unicode and self.ugettext or self.gettext
if hasattr(names, '__contains__'):
if 'gettext' in names:
__builtin__.__dict__['gettext'] = __builtin__.__dict__['_']
if 'ngettext' in names:
__builtin__.__dict__['ngettext'] = unicode and self.ungettext or self.ngettext
if 'lgettext' in names:
__builtin__.__dict__['lgettext'] = self.lgettext
if 'lngettext' in names:
__builtin__.__dict__['lngettext'] = self.lngettext
class GNUTranslations(NullTranslations):
LE_MAGIC = 2500072158
BE_MAGIC = 3725722773
def _parse(self, fp):
"""Override this method to support alternative .mo formats."""
unpack = struct.unpack
filename = getattr(fp, 'name', '')
self._catalog = catalog = {}
self.plural = lambda n: int(n != 1)
buf = fp.read()
buflen = len(buf)
magic = unpack('<I', buf[:4])[0]
if magic == self.LE_MAGIC:
version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == self.BE_MAGIC:
version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise IOError(0, 'Bad magic number', filename)
for i in xrange(0, msgcount):
mlen, moff = unpack(ii, buf[masteridx:masteridx + 8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx + 8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise IOError(0, 'File is corrupt', filename)
if mlen == 0:
lastk = k = None
for item in tmsg.splitlines():
item = item.strip()
if not item:
continue
if ':' in item:
k, v = item.split(':', 1)
k = k.strip().lower()
v = v.strip()
self._info[k] = v
lastk = k
elif lastk:
self._info[lastk] += '\n' + item
if k == 'content-type':
self._charset = v.split('charset=')[1]
elif k == 'plural-forms':
v = v.split(';')
plural = v[1].split('plural=')[1]
self.plural = c2py(plural)
if '\x00' in msg:
msgid1, msgid2 = msg.split('\x00')
tmsg = tmsg.split('\x00')
if self._charset:
msgid1 = unicode(msgid1, self._charset)
tmsg = [ unicode(x, self._charset) for x in tmsg ]
for i in range(len(tmsg)):
catalog[msgid1, i] = tmsg[i]
else:
if self._charset:
msg = unicode(msg, self._charset)
tmsg = unicode(tmsg, self._charset)
catalog[msg] = tmsg
masteridx += 8
transidx += 8
return
def gettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.gettext(message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
if self._charset:
return tmsg.encode(self._charset)
return tmsg
def lgettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lgettext(message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[msgid1, self.plural(n)]
if self._output_charset:
return tmsg.encode(self._output_charset)
if self._charset:
return tmsg.encode(self._charset)
return tmsg
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
else:
if n == 1:
return msgid1
return msgid2
def lngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[msgid1, self.plural(n)]
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
except KeyError:
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
else:
if n == 1:
return msgid1
return msgid2
def ugettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.ugettext(message)
return unicode(message)
return tmsg
def ungettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[msgid1, self.plural(n)]
except KeyError:
if self._fallback:
return self._fallback.ungettext(msgid1, msgid2, n)
if n == 1:
tmsg = unicode(msgid1)
else:
tmsg = unicode(msgid2)
return tmsg
def find(domain, localedir=None, languages=None, all=0):
if localedir is None:
localedir = _default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
if all:
result = []
else:
result = None
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
if os.path.exists(mofile):
if all:
result.append(mofile)
else:
return mofile
return result
_translations = {}
def translation(domain, localedir=None, languages=None, class_=None, fallback=False, codeset=None):
if class_ is None:
class_ = GNUTranslations
mofiles = find(domain, localedir, languages, all=1)
if not mofiles:
if fallback:
return NullTranslations()
raise IOError(ENOENT, 'No translation file found for domain', domain)
result = None
for mofile in mofiles:
key = (
class_, os.path.abspath(mofile))
t = _translations.get(key)
if t is None:
with open(mofile, 'rb') as fp:
t = _translations.setdefault(key, class_(fp))
t = copy.copy(t)
if codeset:
t.set_output_charset(codeset)
if result is None:
result = t
else:
result.add_fallback(t)
return result
def install(domain, localedir=None, unicode=False, codeset=None, names=None):
t = translation(domain, localedir, fallback=True, codeset=codeset)
t.install(unicode, names)
_localedirs = {}
_localecodesets = {}
_current_domain = 'messages'
def textdomain(domain=None):
global _current_domain
if domain is not None:
_current_domain = domain
return _current_domain
def bindtextdomain(domain, localedir=None):
global _localedirs
if localedir is not None:
_localedirs[domain] = localedir
return _localedirs.get(domain, _default_localedir)
def bind_textdomain_codeset(domain, codeset=None):
global _localecodesets
if codeset is not None:
_localecodesets[domain] = codeset
return _localecodesets.get(domain)
def dgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain))
except IOError:
return message
return t.gettext(message)
def ldgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain))
except IOError:
return message
return t.lgettext(message)
def dngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain))
except IOError:
if n == 1:
return msgid1
else:
return msgid2
return t.ngettext(msgid1, msgid2, n)
def ldngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None), codeset=_localecodesets.get(domain))
except IOError:
if n == 1:
return msgid1
else:
return msgid2
return t.lngettext(msgid1, msgid2, n)
def gettext(message):
return dgettext(_current_domain, message)
def lgettext(message):
return ldgettext(_current_domain, message)
def ngettext(msgid1, msgid2, n):
return dngettext(_current_domain, msgid1, msgid2, n)
def lngettext(msgid1, msgid2, n):
return ldngettext(_current_domain, msgid1, msgid2, n)
Catalog = translation
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests dealing with HTTP rate-limiting.
"""
from xml.dom import minidom
from lxml import etree
from oslo_serialization import jsonutils
import six
from six.moves import http_client
from six.moves import range
import webob
from cinder.api.v1 import limits
from cinder.api import views
from cinder.api import xmlutil
import cinder.context
from cinder import test
TEST_LIMITS = [
limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
limits.Limit("POST", "/volumes", "^/volumes", 3, limits.PER_MINUTE),
limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
limits.Limit("PUT", "/volumes", "^/volumes", 5, limits.PER_MINUTE),
]
NS = {
'atom': 'http://www.w3.org/2005/Atom',
'ns': 'http://docs.openstack.org/common/api/v1.0'
}
class BaseLimitTestSuite(test.TestCase):
"""Base test suite which provides relevant stubs and time abstraction."""
def setUp(self):
super(BaseLimitTestSuite, self).setUp()
self.time = 0.0
self.stubs.Set(limits.Limit, "_get_time", self._get_time)
self.absolute_limits = {}
def stub_get_project_quotas(context, project_id, usages=True):
return {k: dict(limit=v) for k, v in self.absolute_limits.items()}
self.stubs.Set(cinder.quota.QUOTAS, "get_project_quotas",
stub_get_project_quotas)
def _get_time(self):
"""Return the "time" according to this test suite."""
return self.time
class LimitsControllerTest(BaseLimitTestSuite):
"""Tests for `limits.LimitsController` class."""
def setUp(self):
"""Run before each test."""
super(LimitsControllerTest, self).setUp()
self.controller = limits.create_resource()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
request = webob.Request.blank("/")
request.accept = accept_header
request.environ["wsgiorg.routing_args"] = (None, {
"action": "index",
"controller": "",
})
context = cinder.context.RequestContext('testuser', 'testproject')
request.environ["cinder.context"] = context
return request
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
limits.Limit("GET", "changes-since*", "changes-since",
5, 60).display(),
]
request.environ["cinder.limits"] = _limits
return request
def test_empty_index_json(self):
"""Test getting empty limit details in JSON."""
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def test_index_json(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
'gigabytes': 512,
'volumes': 5,
}
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
{
"verb": "POST",
"next-available": "1970-01-01T00:00:00",
"unit": "HOUR",
"value": 5,
"remaining": 5,
},
],
},
{
"regex": "changes-since",
"uri": "changes-since*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 5,
"remaining": 5,
},
],
},
],
"absolute": {"maxTotalVolumeGigabytes": 512,
"maxTotalVolumes": 5, },
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _populate_limits_diff_regex(self, request):
"""Put limit info into a request."""
_limits = [
limits.Limit("GET", "*", ".*", 10, 60).display(),
limits.Limit("GET", "*", "*.*", 10, 60).display(),
]
request.environ["cinder.limits"] = _limits
return request
def test_index_diff_regex(self):
"""Test getting limit details in JSON."""
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
expected = {
"limits": {
"rate": [
{
"regex": ".*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
{
"regex": "*.*",
"uri": "*",
"limit": [
{
"verb": "GET",
"next-available": "1970-01-01T00:00:00",
"unit": "MINUTE",
"value": 10,
"remaining": 10,
},
],
},
],
"absolute": {},
},
}
body = jsonutils.loads(response.body)
self.assertEqual(expected, body)
def _test_index_absolute_limits_json(self, expected):
request = self._get_index_request()
response = request.get_response(self.controller)
body = jsonutils.loads(response.body)
self.assertEqual(expected, body['limits']['absolute'])
def test_index_ignores_extra_absolute_limits_json(self):
self.absolute_limits = {'unknown_limit': 9001}
self._test_index_absolute_limits_json({})
class TestLimiter(limits.Limiter):
pass
class LimitMiddlewareTest(BaseLimitTestSuite):
"""Tests for the `limits.RateLimitingMiddleware` class."""
@webob.dec.wsgify
def _empty_app(self, request):
"""Do-nothing WSGI app."""
pass
def setUp(self):
"""Prepare middleware for use through fake WSGI app."""
super(LimitMiddlewareTest, self).setUp()
_limits = '(GET, *, .*, 1, MINUTE)'
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits,
"%s.TestLimiter" %
self.__class__.__module__)
def test_limit_class(self):
"""Test that middleware selected correct limiter class."""
assert isinstance(self.app._limiter, TestLimiter)
def test_good_request(self):
"""Test successful GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
"""Test a rate-limited (413) GET request through middleware."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(413, response.status_int)
self.assertIn('Retry-After', response.headers)
retry_after = int(response.headers['Retry-After'])
self.assertAlmostEqual(retry_after, 60, 1)
body = jsonutils.loads(response.body)
expected = "Only 1 GET request(s) can be made to * every minute."
value = body["overLimitFault"]["details"].strip()
self.assertEqual(expected, value)
def test_limited_request_xml(self):
"""Test a rate-limited (413) response as XML."""
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
request = webob.Request.blank("/")
request.accept = "application/xml"
response = request.get_response(self.app)
self.assertEqual(413, response.status_int)
root = minidom.parseString(response.body).childNodes[0]
expected = "Only 1 GET request(s) can be made to * every minute."
details = root.getElementsByTagName("details")
self.assertEqual(1, details.length)
value = details.item(0).firstChild.data.strip()
self.assertEqual(expected, value)
class LimitTest(BaseLimitTestSuite):
"""Tests for the `limits.Limit` class."""
def test_GET_no_delay(self):
"""Test a limit handles 1 GET per second."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(0, limit.next_request)
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
"""Test two calls to 1 GET per second limit."""
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertIsNone(delay)
delay = limit("GET", "/anything")
self.assertEqual(1, delay)
self.assertEqual(1, limit.next_request)
self.assertEqual(0, limit.last_request)
self.time += 4
delay = limit("GET", "/anything")
self.assertIsNone(delay)
self.assertEqual(4, limit.next_request)
self.assertEqual(4, limit.last_request)
class ParseLimitsTest(BaseLimitTestSuite):
"""Tests for the default limits parser in the `limits.Limiter` class."""
def test_invalid(self):
"""Test that parse_limits() handles invalid input correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
"""Test that parse_limits() handles bad rules correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
"""Test that parse_limits() handles missing args correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
"""Test that parse_limits() handles bad values correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
"""Test that parse_limits() handles bad units correctly."""
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
"""Test that parse_limits() handles multiple rules correctly."""
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
'(POST, /bar*, /bar.*, 5, second);'
'(Say, /derp*, /derp.*, 1, day)')
except ValueError as e:
assert False, e
# Make sure the number of returned limits are correct
self.assertEqual(4, len(l))
# Check all the verbs...
expected = ['GET', 'PUT', 'POST', 'SAY']
self.assertEqual(expected, [t.verb for t in l])
# ...the URIs...
expected = ['*', '/foo*', '/bar*', '/derp*']
self.assertEqual(expected, [t.uri for t in l])
# ...the regexes...
expected = ['.*', '/foo.*', '/bar.*', '/derp.*']
self.assertEqual(expected, [t.regex for t in l])
# ...the values...
expected = [20, 10, 5, 1]
self.assertEqual(expected, [t.value for t in l])
# ...and the units...
expected = [limits.PER_MINUTE, limits.PER_HOUR,
limits.PER_SECOND, limits.PER_DAY]
self.assertEqual(expected, [t.unit for t in l])
class LimiterTest(BaseLimitTestSuite):
"""Tests for the in-memory `limits.Limiter` class."""
def setUp(self):
"""Run before each test."""
super(LimiterTest, self).setUp()
userlimits = {'limits.user3': '',
'limits.user0': '(get, *, .*, 4, minute);'
'(put, *, .*, 2, minute)'}
self.limiter = limits.Limiter(TEST_LIMITS, **userlimits)
def _check(self, num, verb, url, username=None):
"""Check and yield results from checks."""
for x in range(num):
yield self.limiter.check_for_delay(verb, url, username)[0]
def _check_sum(self, num, verb, url, username=None):
"""Check and sum results from checks."""
results = self._check(num, verb, url, username)
return sum(item for item in results if item)
def test_no_delay_GET(self):
"""no delay on a single call for a limit verb we didn"t set."""
delay = self.limiter.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_no_delay_PUT(self):
"""no delay on a single call for a known limit."""
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual((None, None), delay)
def test_delay_PUT(self):
"""test delay on 11th put request.
the 11th PUT will result in a delay of 6.0 seconds until
the next request will be granted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_POST(self):
"""test delay of 8th post request.
Ensure that the 8th POST will result in a delay of 6.0 seconds
until the next request will be granted.
"""
expected = [None] * 7
results = list(self._check(7, "POST", "/anything"))
self.assertEqual(expected, results)
expected = 60.0 / 7.0
results = self._check_sum(1, "POST", "/anything")
self.assertAlmostEqual(expected, results, 8)
def test_delay_GET(self):
"""Ensure the 11th GET will result in NO delay."""
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 4 + [15.0]
results = list(self._check(5, "GET", "/foo", "user0"))
self.assertEqual(expected, results)
def test_delay_PUT_volumes(self):
"""Test limit of PUT on /volumes.
Ensure PUT on /volumes limits at 5 requests, and PUT elsewhere is
still OK after 5 requests...
but then after 11 total requests, PUT limiting kicks in.
"""
# First 6 requests on PUT /volumes
expected = [None] * 5 + [12.0]
results = list(self._check(6, "PUT", "/volumes"))
self.assertEqual(expected, results)
# Next 5 request on PUT /anything
expected = [None] * 4 + [6.0]
results = list(self._check(5, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_delay_PUT_wait(self):
"""Test limit on PUT is lifted.
Ensure after hitting the limit and then waiting for the correct
amount of time, the limit will be lifted.
"""
expected = [None] * 10 + [6.0]
results = list(self._check(11, "PUT", "/anything"))
self.assertEqual(expected, results)
# Advance time
self.time += 6.0
expected = [None, 6.0]
results = list(self._check(2, "PUT", "/anything"))
self.assertEqual(expected, results)
def test_multiple_delays(self):
"""Ensure multiple requests still get a delay."""
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
self.time += 1.0
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything"))
self.assertEqual(expected, results)
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
def test_user_limit(self):
"""Test user-specific limits."""
self.assertEqual([], self.limiter.levels['user3'])
self.assertEqual(2, len(self.limiter.levels['user0']))
def test_multiple_users(self):
"""Tests involving multiple users."""
# User0
expected = [None] * 2 + [30.0] * 8
results = list(self._check(10, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
# User2
expected = [None] * 10 + [6.0] * 5
results = list(self._check(15, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User3
expected = [None] * 20
results = list(self._check(20, "PUT", "/anything", "user3"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [5.0] * 10
results = list(self._check(10, "PUT", "/anything", "user1"))
self.assertEqual(expected, results)
self.time += 1.0
# User1 again
expected = [4.0] * 5
results = list(self._check(5, "PUT", "/anything", "user2"))
self.assertEqual(expected, results)
# User0 again
expected = [28.0]
results = list(self._check(1, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
self.time += 28.0
expected = [None, 30.0]
results = list(self._check(2, "PUT", "/anything", "user0"))
self.assertEqual(expected, results)
class WsgiLimiterTest(BaseLimitTestSuite):
"""Tests for `limits.WsgiLimiter` class."""
def setUp(self):
"""Run before each test."""
super(WsgiLimiterTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
def _request_data(self, verb, path):
"""Get data describing a limit request verb/path."""
return jsonutils.dumps({"verb": verb, "path": path})
def _request(self, verb, url, username=None):
"""Assert that POSTing to given url triggers given action.
Ensure POSTing to the given url causes the given username
to perform the given action.
Make the internal rate limiter return delay and make sure that the
WSGI app returns the correct response.
"""
if username:
request = webob.Request.blank("/%s" % username)
else:
request = webob.Request.blank("/")
request.method = "POST"
request.body = self._request_data(verb, url)
response = request.get_response(self.app)
if "X-Wait-Seconds" in response.headers:
self.assertEqual(403, response.status_int)
return response.headers["X-Wait-Seconds"]
self.assertEqual(204, response.status_int)
def test_invalid_methods(self):
"""Only POSTs should work."""
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(405, response.status_int)
def test_good_url(self):
delay = self._request("GET", "/something")
self.assertIsNone(delay)
def test_escaping(self):
delay = self._request("GET", "/something/jump%20up")
self.assertIsNone(delay)
def test_response_to_delays(self):
delay = self._request("GET", "/delayed")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed")
self.assertEqual('60.00', delay)
def test_response_to_delays_usernames(self):
delay = self._request("GET", "/delayed", "user1")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user2")
self.assertIsNone(delay)
delay = self._request("GET", "/delayed", "user1")
self.assertEqual('60.00', delay)
delay = self._request("GET", "/delayed", "user2")
self.assertEqual('60.00', delay)
class FakeHttplibSocket(object):
"""Fake `http_client.HTTPResponse` replacement."""
def __init__(self, response_string):
"""Initialize new `FakeHttplibSocket`."""
self._buffer = six.StringIO(response_string)
def makefile(self, _mode, _other):
"""Returns the socket's internal buffer."""
return self._buffer
class FakeHttplibConnection(object):
"""Fake `http_client.HTTPConnection`."""
def __init__(self, app, host):
"""Initialize `FakeHttplibConnection`."""
self.app = app
self.host = host
def request(self, method, path, body="", headers=None):
"""Fake method for request.
Requests made via this connection actually get translated and
routed into our WSGI app, we then wait for the response and turn
it back into an `http_client.HTTPResponse`.
"""
if not headers:
headers = {}
req = webob.Request.blank(path)
req.method = method
req.headers = headers
req.host = self.host
req.body = body
resp = str(req.get_response(self.app))
resp = "HTTP/1.0 %s" % resp
sock = FakeHttplibSocket(resp)
self.http_response = http_client.HTTPResponse(sock)
self.http_response.begin()
def getresponse(self):
"""Return our generated response from the request."""
return self.http_response
def wire_HTTPConnection_to_WSGI(host, app):
"""Monkeypatches HTTPConnection.
Monkeypatches HTTPConnection so that if you try to connect to host, you
are instead routed straight to the given WSGI app.
After calling this method, when any code calls
http_client.HTTPConnection(host)
the connection object will be a fake. Its requests will be sent directly
to the given WSGI app rather than through a socket.
Code connecting to hosts other than host will not be affected.
This method may be called multiple times to map different hosts to
different apps.
This method returns the original HTTPConnection object, so that the caller
can restore the default HTTPConnection interface (for all hosts).
"""
class HTTPConnectionDecorator(object):
"""Decorator to mock the HTTPConnection class.
Wraps the real HTTPConnection class so that when you instantiate
the class you might instead get a fake instance.
"""
def __init__(self, wrapped):
self.wrapped = wrapped
def __call__(self, connection_host, *args, **kwargs):
if connection_host == host:
return FakeHttplibConnection(app, host)
else:
return self.wrapped(connection_host, *args, **kwargs)
oldHTTPConnection = http_client.HTTPConnection
new_http_connection = HTTPConnectionDecorator(http_client.HTTPConnection)
http_client.HTTPConnection = new_http_connection
return oldHTTPConnection
class WsgiLimiterProxyTest(BaseLimitTestSuite):
"""Tests for the `limits.WsgiLimiterProxy` class."""
def setUp(self):
"""setUp for test suite.
Do some nifty HTTP/WSGI magic which allows for WSGI to be called
directly by something like the `http_client` library.
"""
super(WsgiLimiterProxyTest, self).setUp()
self.app = limits.WsgiLimiter(TEST_LIMITS)
self.oldHTTPConnection = (
wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app))
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
self.addCleanup(self._restore, self.oldHTTPConnection)
def _restore(self, oldHTTPConnection):
# restore original HTTPConnection object
http_client.HTTPConnection = oldHTTPConnection
def test_200(self):
"""Successful request test."""
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual((None, None), delay)
def test_403(self):
"""Forbidden request test."""
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual((None, None), delay)
delay, error = self.proxy.check_for_delay("GET", "/delayed")
error = error.strip()
expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "
"made to /delayed every minute.")
self.assertEqual(expected, (delay, error))
class LimitsViewBuilderTest(test.TestCase):
def setUp(self):
super(LimitsViewBuilderTest, self).setUp()
self.view_builder = views.limits.ViewBuilder()
self.rate_limits = [{"URI": "*",
"regex": ".*",
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"resetTime": 1311272226},
{"URI": "*/volumes",
"regex": "^/volumes",
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"resetTime": 1311272226}]
self.absolute_limits = {"metadata_items": 1,
"injected_files": 5,
"injected_file_content_bytes": 5}
def test_build_limits(self):
tdate = "2011-07-21T18:17:06"
expected_limits = \
{"limits": {"rate": [{"uri": "*",
"regex": ".*",
"limit": [{"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": tdate}]},
{"uri": "*/volumes",
"regex": "^/volumes",
"limit": [{"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": tdate}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 5}}}
output = self.view_builder.build(self.rate_limits,
self.absolute_limits)
self.assertDictMatch(output, expected_limits)
def test_build_limits_empty_limits(self):
expected_limits = {"limits": {"rate": [],
"absolute": {}}}
abs_limits = {}
rate_limits = []
output = self.view_builder.build(rate_limits, abs_limits)
self.assertDictMatch(output, expected_limits)
class LimitsXMLSerializationTest(test.TestCase):
def test_xml_declaration(self):
serializer = limits.LimitsTemplate()
fixture = {"limits": {
"rate": [],
"absolute": {}}}
output = serializer.serialize(fixture)
has_dec = output.startswith("<?xml version='1.0' encoding='UTF-8'?>")
self.assertTrue(has_dec)
def test_index(self):
serializer = limits.LimitsTemplate()
fixture = {
"limits": {
"rate": [{
"uri": "*",
"regex": ".*",
"limit": [{
"value": 10,
"verb": "POST",
"remaining": 2,
"unit": "MINUTE",
"next-available": "2011-12-15T22:42:45Z"}]},
{"uri": "*/servers",
"regex": "^/servers",
"limit": [{
"value": 50,
"verb": "POST",
"remaining": 10,
"unit": "DAY",
"next-available": "2011-12-15T22:42:45Z"}]}],
"absolute": {"maxServerMeta": 1,
"maxImageMeta": 1,
"maxPersonality": 5,
"maxPersonalitySize": 10240}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
# verify absolute limits
absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
self.assertEqual(4, len(absolutes))
for limit in absolutes:
name = limit.get('name')
value = limit.get('value')
self.assertEqual(str(fixture['limits']['absolute'][name]), value)
# verify rate limits
rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
self.assertEqual(2, len(rates))
for i, rate in enumerate(rates):
for key in ['uri', 'regex']:
self.assertEqual(str(fixture['limits']['rate'][i][key]),
rate.get(key))
rate_limits = rate.xpath('ns:limit', namespaces=NS)
self.assertEqual(1, len(rate_limits))
for j, limit in enumerate(rate_limits):
for key in ['verb', 'value', 'remaining', 'unit',
'next-available']:
self.assertEqual(
str(fixture['limits']['rate'][i]['limit'][j][key]),
limit.get(key))
def test_index_no_limits(self):
serializer = limits.LimitsTemplate()
fixture = {"limits": {
"rate": [],
"absolute": {}}}
output = serializer.serialize(fixture)
root = etree.XML(output)
xmlutil.validate_schema(root, 'limits')
# verify absolute limits
absolutes = root.xpath('ns:absolute/ns:limit', namespaces=NS)
self.assertEqual(0, len(absolutes))
# verify rate limits
rates = root.xpath('ns:rates/ns:rate', namespaces=NS)
self.assertEqual(0, len(rates))
| |
"""
@package mi.instrument.noaa.lily.ooicore.test.test_driver
@file marine-integrations/mi/instrument/noaa/ooicore/test/test_driver.py
@author Pete Cable
@brief Test cases for ooicore driver
USAGE:
Make tests verbose and provide stdout
* From the IDK
$ bin/test_driver
$ bin/test_driver -u [-t testname]
$ bin/test_driver -i [-t testname]
$ bin/test_driver -q [-t testname]
"""
import time
import ntplib
import unittest
import mi.instrument.noaa.botpt.ooicore.particles as particles
from mi.core.instrument.port_agent_client import PortAgentPacket
from mock import Mock, call
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.idk.unit_test import InstrumentDriverTestCase
from mi.idk.unit_test import InstrumentDriverUnitTestCase
from mi.idk.unit_test import InstrumentDriverIntegrationTestCase
from mi.idk.unit_test import InstrumentDriverQualificationTestCase
from mi.idk.unit_test import DriverTestMixin
from mi.idk.unit_test import ParameterTestConfigKey
from mi.idk.unit_test import AgentCapabilityType
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.exceptions import InstrumentDataException, SampleException, InstrumentProtocolException
from mi.instrument.noaa.botpt.ooicore.driver import Prompt, ScheduledJob
from mi.instrument.noaa.botpt.ooicore.driver import Parameter
from mi.instrument.noaa.botpt.ooicore.driver import ProtocolState
from mi.instrument.noaa.botpt.ooicore.driver import ProtocolEvent
from mi.instrument.noaa.botpt.ooicore.driver import InstrumentDriver
from mi.instrument.noaa.botpt.ooicore.driver import Protocol
from mi.instrument.noaa.botpt.ooicore.driver import ParameterConstraint
from mi.instrument.noaa.botpt.ooicore.driver import Capability
from mi.instrument.noaa.botpt.ooicore.driver import InstrumentCommands
from mi.instrument.noaa.botpt.ooicore.driver import NEWLINE
import mi.instrument.noaa.botpt.ooicore.test.test_samples as samples
from mi.core.exceptions import BadRequest, ResourceError
__author__ = 'Pete Cable'
__license__ = 'Apache 2.0'
log = get_logger()
botpt_startup_config = {
DriverConfigKey.PARAMETERS: {
Parameter.AUTO_RELEVEL: True,
Parameter.LEVELING_TIMEOUT: 600,
Parameter.XTILT_TRIGGER: 300.0,
Parameter.YTILT_TRIGGER: 300.0,
Parameter.OUTPUT_RATE: 40,
}
}
# ##
# Driver parameters for the tests
# ##
InstrumentDriverTestCase.initialize(
driver_module='mi.instrument.noaa.botpt.ooicore.driver',
driver_class="InstrumentDriver",
instrument_agent_resource_id='1D644T',
instrument_agent_name='noaa_botpt_ooicore',
instrument_agent_packet_config=particles.DataParticleType(),
driver_startup_config=botpt_startup_config
)
GO_ACTIVE_TIMEOUT = 180
#################################### RULES ####################################
# #
# Common capabilities in the base class #
# #
# Instrument specific stuff in the derived class #
# #
# Generator spits out either stubs or comments describing test this here, #
# test that there. #
# #
# Qualification tests are driven through the instrument_agent #
# #
###############################################################################
###############################################################################
# DRIVER TEST MIXIN #
# Defines a set of constants and assert methods used for data particle #
# verification #
# #
# In python mixin classes are classes designed such that they wouldn't be #
# able to stand on their own, but are inherited by other classes generally #
# using multiple inheritance. #
# #
# This class defines a configuration structure for testing and common assert #
# methods for validating data particles. #
###############################################################################
class BotptTestMixinSub(DriverTestMixin):
TYPE = ParameterTestConfigKey.TYPE
READONLY = ParameterTestConfigKey.READONLY
STARTUP = ParameterTestConfigKey.STARTUP
DA = ParameterTestConfigKey.DIRECT_ACCESS
VALUE = ParameterTestConfigKey.VALUE
REQUIRED = ParameterTestConfigKey.REQUIRED
DEFAULT = ParameterTestConfigKey.DEFAULT
STATES = ParameterTestConfigKey.STATES
_driver_parameters = {
# Parameters defined in the IOS
# RW
Parameter.AUTO_RELEVEL: {TYPE: bool, READONLY: False, DA: False, STARTUP: True, VALUE: True},
Parameter.XTILT_TRIGGER: {TYPE: float, READONLY: False, DA: False, STARTUP: True, VALUE: 300},
Parameter.YTILT_TRIGGER: {TYPE: float, READONLY: False, DA: False, STARTUP: True, VALUE: 300},
Parameter.LEVELING_TIMEOUT: {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 600},
Parameter.OUTPUT_RATE: {TYPE: int, READONLY: False, DA: False, STARTUP: True, VALUE: 40},
# RO
Parameter.LILY_LEVELING: {TYPE: bool, READONLY: True, DA: False, STARTUP: False, VALUE: False},
Parameter.LEVELING_FAILED: {TYPE: bool, READONLY: True, DA: False, STARTUP: False, VALUE: False},
}
_samples = [samples.LILY_VALID_SAMPLE_01, samples.LILY_VALID_SAMPLE_02, samples.HEAT_VALID_SAMPLE_01,
samples.HEAT_VALID_SAMPLE_02, samples.IRIS_VALID_SAMPLE_01, samples.IRIS_VALID_SAMPLE_02,
samples.NANO_VALID_SAMPLE_01, samples.NANO_VALID_SAMPLE_02, samples.LEVELING_STATUS,
samples.SWITCHING_STATUS, samples.LEVELED_STATUS, samples.X_OUT_OF_RANGE, samples.Y_OUT_OF_RANGE]
_driver_capabilities = {
# capabilities defined in the IOS
Capability.DISCOVER: {STATES: [ProtocolState.UNKNOWN]},
Capability.ACQUIRE_STATUS: {STATES: [ProtocolState.AUTOSAMPLE]},
Capability.START_LEVELING: {STATES: [ProtocolState.AUTOSAMPLE]},
Capability.STOP_LEVELING: {STATES: [ProtocolState.AUTOSAMPLE]},
}
_capabilities = {
ProtocolState.UNKNOWN: ['DRIVER_EVENT_DISCOVER'],
ProtocolState.AUTOSAMPLE: ['DRIVER_EVENT_GET',
'DRIVER_EVENT_SET',
'DRIVER_EVENT_ACQUIRE_STATUS',
'DRIVER_EVENT_START_AUTOSAMPLE',
'DRIVER_EVENT_START_DIRECT',
'PROTOCOL_EVENT_START_LEVELING',
'PROTOCOL_EVENT_STOP_LEVELING',
'PROTOCOL_EVENT_LEVELING_TIMEOUT',
'PROTOCOL_EVENT_NANO_TIME_SYNC'],
ProtocolState.DIRECT_ACCESS: ['DRIVER_EVENT_STOP_DIRECT',
'EXECUTE_DIRECT'],
}
lily_sample_parameters_01 = {
particles.LilySampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilySampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/06/24 23:36:02', REQUIRED: True},
particles.LilySampleParticleKey.X_TILT: {TYPE: float, VALUE: -235.500, REQUIRED: True},
particles.LilySampleParticleKey.Y_TILT: {TYPE: float, VALUE: 25.930, REQUIRED: True},
particles.LilySampleParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 194.30, REQUIRED: True},
particles.LilySampleParticleKey.TEMP: {TYPE: float, VALUE: 26.04, REQUIRED: True},
particles.LilySampleParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.96, REQUIRED: True},
particles.LilySampleParticleKey.SN: {TYPE: unicode, VALUE: 'N9655', REQUIRED: True},
}
lily_sample_parameters_02 = {
particles.LilySampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilySampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/06/24 23:36:04', REQUIRED: True},
particles.LilySampleParticleKey.X_TILT: {TYPE: float, VALUE: -235.349, REQUIRED: True},
particles.LilySampleParticleKey.Y_TILT: {TYPE: float, VALUE: 26.082, REQUIRED: True},
particles.LilySampleParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 194.26, REQUIRED: True},
particles.LilySampleParticleKey.TEMP: {TYPE: float, VALUE: 26.04, REQUIRED: True},
particles.LilySampleParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.96, REQUIRED: True},
particles.LilySampleParticleKey.SN: {TYPE: unicode, VALUE: 'N9655', REQUIRED: True},
}
nano_sample_parameters_01 = {
particles.NanoSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'NANO', REQUIRED: True},
particles.NanoSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/08/22 22:48:36.013', REQUIRED: True},
particles.NanoSampleParticleKey.PRESSURE: {TYPE: float, VALUE: 13.888533, REQUIRED: True},
particles.NanoSampleParticleKey.TEMP: {TYPE: float, VALUE: 26.147947328, REQUIRED: True},
particles.NanoSampleParticleKey.PPS_SYNC: {TYPE: unicode, VALUE: u'V', REQUIRED: True},
}
nano_sample_parameters_02 = {
particles.NanoSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'NANO', REQUIRED: True},
particles.NanoSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/08/22 23:13:36.000', REQUIRED: True},
particles.NanoSampleParticleKey.PRESSURE: {TYPE: float, VALUE: 13.884067, REQUIRED: True},
particles.NanoSampleParticleKey.TEMP: {TYPE: float, VALUE: 26.172926006, REQUIRED: True},
particles.NanoSampleParticleKey.PPS_SYNC: {TYPE: unicode, VALUE: u'P', REQUIRED: True},
}
iris_sample_parameters_01 = {
particles.IrisSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'IRIS', REQUIRED: True},
particles.IrisSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/05/29 00:25:34', REQUIRED: True},
particles.IrisSampleParticleKey.X_TILT: {TYPE: float, VALUE: -0.0882, REQUIRED: True},
particles.IrisSampleParticleKey.Y_TILT: {TYPE: float, VALUE: -0.7524, REQUIRED: True},
particles.IrisSampleParticleKey.TEMP: {TYPE: float, VALUE: 28.45, REQUIRED: True},
particles.IrisSampleParticleKey.SN: {TYPE: unicode, VALUE: 'N8642', REQUIRED: True}
}
iris_sample_parameters_02 = {
particles.IrisSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'IRIS', REQUIRED: True},
particles.IrisSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/05/29 00:25:36', REQUIRED: True},
particles.IrisSampleParticleKey.X_TILT: {TYPE: float, VALUE: -0.0885, REQUIRED: True},
particles.IrisSampleParticleKey.Y_TILT: {TYPE: float, VALUE: -0.7517, REQUIRED: True},
particles.IrisSampleParticleKey.TEMP: {TYPE: float, VALUE: 28.49, REQUIRED: True},
particles.IrisSampleParticleKey.SN: {TYPE: unicode, VALUE: 'N8642', REQUIRED: True}
}
heat_sample_parameters_01 = {
particles.HeatSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'HEAT', REQUIRED: True},
particles.HeatSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/04/19 22:54:11', REQUIRED: True},
particles.HeatSampleParticleKey.X_TILT: {TYPE: int, VALUE: -1, REQUIRED: True},
particles.HeatSampleParticleKey.Y_TILT: {TYPE: int, VALUE: 1, REQUIRED: True},
particles.HeatSampleParticleKey.TEMP: {TYPE: int, VALUE: 25, REQUIRED: True}
}
heat_sample_parameters_02 = {
particles.HeatSampleParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'HEAT', REQUIRED: True},
particles.HeatSampleParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/04/19 22:54:11', REQUIRED: True},
particles.HeatSampleParticleKey.X_TILT: {TYPE: int, VALUE: 1, REQUIRED: True},
particles.HeatSampleParticleKey.Y_TILT: {TYPE: int, VALUE: 1, REQUIRED: True},
particles.HeatSampleParticleKey.TEMP: {TYPE: int, VALUE: 25, REQUIRED: True}
}
botpt_status_parameters_01 = {
particles.BotptStatusParticleKey.LILY1: {TYPE: unicode, VALUE: samples.LILY_FILTERED_STATUS1, REQUIRED: True},
particles.BotptStatusParticleKey.LILY2: {TYPE: unicode, VALUE: samples.LILY_FILTERED_STATUS2, REQUIRED: True},
particles.BotptStatusParticleKey.IRIS1: {TYPE: unicode, VALUE: samples.IRIS_FILTERED_STATUS1, REQUIRED: True},
particles.BotptStatusParticleKey.IRIS2: {TYPE: unicode, VALUE: samples.IRIS_FILTERED_STATUS2, REQUIRED: True},
particles.BotptStatusParticleKey.NANO: {TYPE: unicode, VALUE: samples.NANO_FILTERED_STATUS, REQUIRED: True},
particles.BotptStatusParticleKey.SYST: {TYPE: unicode, VALUE: samples.SYST_FILTERED_STATUS, REQUIRED: True},
}
lily_leveling_parameters_01 = {
particles.LilyLevelingParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilyLevelingParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/07/24 20:36:27', REQUIRED: True},
particles.LilyLevelingParticleKey.X_TILT: {TYPE: float, VALUE: 14.667, REQUIRED: True},
particles.LilyLevelingParticleKey.Y_TILT: {TYPE: float, VALUE: 81.642, REQUIRED: True},
particles.LilyLevelingParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 185.21, REQUIRED: True},
particles.LilyLevelingParticleKey.TEMP: {TYPE: float, VALUE: 33.67, REQUIRED: True},
particles.LilyLevelingParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.59, REQUIRED: True},
particles.LilyLevelingParticleKey.SN: {TYPE: unicode, VALUE: u'N9651', REQUIRED: True},
particles.LilyLevelingParticleKey.STATUS: {TYPE: unicode, VALUE: u'None', REQUIRED: True}
}
lily_leveling_parameters_02 = {
particles.LilyLevelingParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilyLevelingParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/06/28 17:29:21', REQUIRED: True},
particles.LilyLevelingParticleKey.X_TILT: {TYPE: float, VALUE: -2.277, REQUIRED: True},
particles.LilyLevelingParticleKey.Y_TILT: {TYPE: float, VALUE: -2.165, REQUIRED: True},
particles.LilyLevelingParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 190.81, REQUIRED: True},
particles.LilyLevelingParticleKey.TEMP: {TYPE: float, VALUE: 25.69, REQUIRED: True},
particles.LilyLevelingParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.87, REQUIRED: True},
particles.LilyLevelingParticleKey.SN: {TYPE: unicode, VALUE: u'N9651', REQUIRED: True},
particles.LilyLevelingParticleKey.STATUS: {TYPE: unicode, VALUE: u'Leveled', REQUIRED: True}
}
lily_leveling_parameters_03 = {
particles.LilyLevelingParticleKey.SENSOR_ID: {TYPE: unicode, VALUE: u'LILY', REQUIRED: True},
particles.LilyLevelingParticleKey.TIME: {TYPE: unicode, VALUE: u'2013/06/28 18:04:41', REQUIRED: True},
particles.LilyLevelingParticleKey.X_TILT: {TYPE: float, VALUE: -7.390, REQUIRED: True},
particles.LilyLevelingParticleKey.Y_TILT: {TYPE: float, VALUE: -14.063, REQUIRED: True},
particles.LilyLevelingParticleKey.MAG_COMPASS: {TYPE: float, VALUE: 190.91, REQUIRED: True},
particles.LilyLevelingParticleKey.TEMP: {TYPE: float, VALUE: 25.83, REQUIRED: True},
particles.LilyLevelingParticleKey.SUPPLY_VOLTS: {TYPE: float, VALUE: 11.87, REQUIRED: True},
particles.LilyLevelingParticleKey.SN: {TYPE: unicode, VALUE: u'N9651', REQUIRED: True},
particles.LilyLevelingParticleKey.STATUS: {TYPE: unicode, VALUE: u'Switching to Y', REQUIRED: True}
}
def assert_driver_parameters(self, current_parameters, verify_values=False):
"""
Verify that all driver parameters are correct and potentially verify values.
@param current_parameters: driver parameters read from the driver instance
@param verify_values: should we verify values against definition?
"""
self.assert_parameters(current_parameters, self._driver_parameters, verify_values)
def assert_particle(self, data_particle, particle_type, particle_keys, sample_data, verify_values=False):
"""
Verify sample particle
@param data_particle: data particle
@param particle_type: particle type
@param particle_keys: particle data keys
@param sample_data: sample values to verify against
@param verify_values: bool, should we verify parameter values
"""
self.assert_data_particle_keys(particle_keys, sample_data)
self.assert_data_particle_header(data_particle, particle_type, require_instrument_timestamp=True)
self.assert_data_particle_parameters(data_particle, sample_data, verify_values)
def assert_particle_lily_sample_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_SAMPLE,
particles.LilySampleParticleKey, self.lily_sample_parameters_01, verify_values)
def assert_particle_lily_sample_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_SAMPLE,
particles.LilySampleParticleKey, self.lily_sample_parameters_02, verify_values)
def assert_particle_nano_sample_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.NANO_SAMPLE,
particles.NanoSampleParticleKey, self.nano_sample_parameters_01, verify_values)
def assert_particle_nano_sample_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.NANO_SAMPLE,
particles.NanoSampleParticleKey, self.nano_sample_parameters_02, verify_values)
def assert_particle_iris_sample_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.IRIS_SAMPLE,
particles.IrisSampleParticleKey, self.iris_sample_parameters_01, verify_values)
def assert_particle_iris_sample_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.IRIS_SAMPLE,
particles.IrisSampleParticleKey, self.iris_sample_parameters_02, verify_values)
def assert_particle_heat_sample_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.HEAT_SAMPLE,
particles.HeatSampleParticleKey, self.heat_sample_parameters_01, verify_values)
def assert_particle_heat_sample_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.HEAT_SAMPLE,
particles.HeatSampleParticleKey, self.heat_sample_parameters_02, verify_values)
def assert_particle_botpt_status(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.BOTPT_STATUS,
particles.BotptStatusParticleKey, self.botpt_status_parameters_01, verify_values)
def assert_particle_lily_leveling_01(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_LEVELING,
particles.LilyLevelingParticleKey, self.lily_leveling_parameters_01, verify_values)
def assert_particle_lily_leveling_02(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_LEVELING,
particles.LilyLevelingParticleKey, self.lily_leveling_parameters_02, verify_values)
def assert_particle_lily_leveling_03(self, data_particle, verify_values=False):
self.assert_particle(data_particle, particles.DataParticleType.LILY_LEVELING,
particles.LilyLevelingParticleKey, self.lily_leveling_parameters_03, verify_values)
def _create_port_agent_packet(self, data_item):
ts = ntplib.system_to_ntp_time(time.time())
port_agent_packet = PortAgentPacket()
port_agent_packet.attach_data(data_item)
port_agent_packet.attach_timestamp(ts)
port_agent_packet.pack_header()
return port_agent_packet
def _send_port_agent_packet(self, driver, data_item):
driver._protocol.got_data(self._create_port_agent_packet(data_item))
def send_side_effect(self, driver):
def inner(data):
response = self._responses.get(data)
if response is not None:
log.debug("my_send: data: %s, my_response: %s", data, response)
self._send_port_agent_packet(driver, response + samples.NEWLINE)
else:
log.debug('No response found for %r', data)
return inner
_responses = {
'NANO,*0100IF\n': samples.NANO_STATUS, # need this for _update_params
'LILY,*9900XYC2\n': 'LILY,2013/06/28 18:04:41,*9900XYC2', # lily on
'IRIS,*9900XYC2\n': 'IRIS,2013/06/28 18:04:41,*9900XYC2', # iris on
'LILY,*9900XY-LEVEL,0\n': 'LILY,2013/06/28 18:04:41,*9900XY-LEVEL,0', # level off
'LILY,*9900XYC-OFF\n': 'LILY,2013/06/28 18:04:41,*9900XYC-OFF', # lily off
'IRIS,*9900XYC-OFF\n': 'IRIS,2013/06/28 18:04:41,*9900XYC-OFF', # iris off
'SYST,1\n': samples.SYST_STATUS,
'LILY,*9900XY-DUMP-SETTINGS\n': samples.LILY_STATUS1,
'LILY,*9900XY-DUMP2\n': samples.LILY_STATUS2,
'IRIS,*9900XY-DUMP-SETTINGS\n': samples.IRIS_STATUS1,
'IRIS,*9900XY-DUMP2\n': samples.IRIS_STATUS2,
'LILY,*9900XY-LEVEL,1\n': 'LILY,2013/06/28 18:04:41,*9900XY-LEVEL,1',
'HEAT,1\n': 'HEAT,2013/06/28 18:04:41,*1',
'HEAT,0\n': 'HEAT,2013/06/28 18:04:41,*0',
'NANO,*0100E4\n': samples.NANO_VALID_SAMPLE_01,
'NANO,TS': samples.NANO_VALID_SAMPLE_01,
}
###############################################################################
# UNIT TESTS #
# Unit tests test the method calls and parameters using Mock. #
# #
# These tests are especially useful for testing parsers and other data #
# handling. The tests generally focus on small segments of code, like a #
# single function call, but more complex code using Mock objects. However #
# if you find yourself mocking too much maybe it is better as an #
# integration test. #
# #
# Unit tests do not start up external processes like the port agent or #
# driver process. #
###############################################################################
# noinspection PyProtectedMember,PyUnusedLocal,PyUnresolvedReferences
@attr('UNIT', group='mi')
class DriverUnitTest(InstrumentDriverUnitTestCase, BotptTestMixinSub):
def setUp(self):
InstrumentDriverUnitTestCase.setUp(self)
def test_connect(self, initial_protocol_state=ProtocolState.AUTOSAMPLE):
"""
Verify we can initialize the driver. Set up mock events for other tests.
@param initial_protocol_state: target protocol state for driver
@return: driver instance
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_initialize_driver(driver, initial_protocol_state)
driver._protocol.set_init_params(botpt_startup_config)
driver._connection.send.side_effect = self.send_side_effect(driver)
driver._protocol._protocol_fsm.on_event_actual = driver._protocol._protocol_fsm.on_event
driver._protocol._protocol_fsm.on_event = Mock()
driver._protocol._protocol_fsm.on_event.side_effect = driver._protocol._protocol_fsm.on_event_actual
driver._protocol._init_params()
return driver
def test_got_data(self):
"""
Verify sample data passed through the got data method produces the correct data particles
"""
driver = self.test_connect()
self.assert_particle_published(driver, samples.LILY_VALID_SAMPLE_01, self.assert_particle_lily_sample_01, True)
self.assert_particle_published(driver, samples.LILY_VALID_SAMPLE_02, self.assert_particle_lily_sample_02, True)
self.assert_particle_published(driver, samples.NANO_VALID_SAMPLE_01, self.assert_particle_nano_sample_01, True)
self.assert_particle_published(driver, samples.NANO_VALID_SAMPLE_02, self.assert_particle_nano_sample_02, True)
self.assert_particle_published(driver, samples.IRIS_VALID_SAMPLE_01, self.assert_particle_iris_sample_01, True)
self.assert_particle_published(driver, samples.IRIS_VALID_SAMPLE_02, self.assert_particle_iris_sample_02, True)
self.assert_particle_published(driver, samples.HEAT_VALID_SAMPLE_01, self.assert_particle_heat_sample_01, True)
self.assert_particle_published(driver, samples.HEAT_VALID_SAMPLE_02, self.assert_particle_heat_sample_02, True)
# disable leveling-related methods to avoid handling these messages (will raise exception)
driver._protocol._check_completed_leveling = Mock()
driver._protocol._check_for_autolevel = Mock()
self.assert_particle_published(driver, samples.LEVELING_STATUS, self.assert_particle_lily_leveling_01, True)
self.assert_particle_published(driver, samples.LEVELED_STATUS, self.assert_particle_lily_leveling_02, True)
self.assert_particle_published(driver, samples.SWITCHING_STATUS, self.assert_particle_lily_leveling_03, True)
self.assert_particle_published(driver, samples.X_OUT_OF_RANGE, self.assert_particle_lily_leveling_02, False)
self.assert_particle_published(driver, samples.Y_OUT_OF_RANGE, self.assert_particle_lily_leveling_02, False)
def test_corrupt_data(self):
"""
Verify corrupt data generates a SampleException
"""
driver = self.test_connect()
for sample, p_type in [
(samples.LILY_VALID_SAMPLE_01, particles.LilySampleParticle),
(samples.IRIS_VALID_SAMPLE_01, particles.IrisSampleParticle),
(samples.NANO_VALID_SAMPLE_01, particles.NanoSampleParticle),
(samples.HEAT_VALID_SAMPLE_01, particles.HeatSampleParticle),
(samples.LEVELING_STATUS, particles.LilyLevelingParticle),
(samples.LILY_STATUS1, particles.LilyStatusParticle1),
(samples.LILY_STATUS2, particles.LilyStatusParticle2),
(samples.IRIS_STATUS1, particles.IrisStatusParticle1),
(samples.IRIS_STATUS2, particles.IrisStatusParticle2),
(samples.NANO_STATUS, particles.NanoStatusParticle),
(samples.SYST_STATUS, particles.SystStatusParticle),
]:
sample = sample[:8] + 'GARBAGE123123124' + sample[8:]
with self.assertRaises(SampleException):
p_type(sample).generate()
def test_status_particle(self):
"""
This particle is not generated via the chunker (because it may contain embedded samples)
so we will test it by manually generating the particle.
"""
ts = ntplib.system_to_ntp_time(time.time())
status = NEWLINE.join([samples.SYST_STATUS, samples.LILY_STATUS1, samples.LILY_STATUS2,
samples.IRIS_STATUS1, samples.IRIS_STATUS2, samples.NANO_STATUS])
self.assert_particle_botpt_status(particles.BotptStatusParticle(status, port_timestamp=ts), verify_values=True)
def test_combined_samples(self):
"""
Verify combined samples produce the correct number of chunks
"""
chunker = StringChunker(Protocol.sieve_function)
ts = self.get_ntp_timestamp()
my_samples = [(samples.BOTPT_FIREHOSE_01, 6),
(samples.BOTPT_FIREHOSE_02, 7)]
for data, num_samples in my_samples:
chunker.add_chunk(data, ts)
results = []
while True:
timestamp, result = chunker.get_next_data()
if result:
results.append(result)
self.assertTrue(result in data)
self.assertEqual(timestamp, ts)
else:
break
self.assertEqual(len(results), num_samples)
def test_chunker(self):
"""
Test the chunker against all input samples
"""
chunker = StringChunker(Protocol.sieve_function)
ts = self.get_ntp_timestamp()
for sample in self._samples:
self.assert_chunker_sample(chunker, sample)
self.assert_chunker_sample_with_noise(chunker, sample)
self.assert_chunker_fragmented_sample(chunker, sample)
self.assert_chunker_combined_sample(chunker, sample)
def test_status_handler(self):
"""
Test the acquire status handler
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.ACQUIRE_STATUS)
@unittest.skip('times out when run with other tests')
def test_leveling_timeout(self):
"""
Test that leveling times out, is stopped, and the appropriate flags are set.
"""
driver = self.test_connect()
expected = [call(ProtocolEvent.GET, Parameter.ALL), # startup get ALL
call(ProtocolEvent.START_LEVELING), # start leveling
call(ProtocolEvent.GET, Parameter.ALL), # config change get ALL
call(ProtocolEvent.LEVELING_TIMEOUT), # leveling timed out
call(ProtocolEvent.GET, Parameter.ALL)] # config change get ALL
try:
# set the leveling timeout to 1 to speed up timeout
driver._protocol._param_dict.set_value(Parameter.LEVELING_TIMEOUT, 1)
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START_LEVELING)
self.assertEqual(driver._protocol._param_dict.get(Parameter.LILY_LEVELING), True)
# sleep for longer than the length of timeout
time.sleep(driver._protocol._param_dict.get(Parameter.LEVELING_TIMEOUT) + 1)
except InstrumentProtocolException:
# assert that we raised the expected events
self.assertEqual(driver._protocol._protocol_fsm.on_event.call_args_list, expected)
self.assertEqual(driver._protocol._param_dict.get(Parameter.LILY_LEVELING), False)
self.assertEqual(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL), False)
self.assertEqual(driver._protocol._param_dict.get(Parameter.LEVELING_FAILED), True)
def test_leveling_complete(self):
"""
Test the driver processes a leveling complete particle
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START_LEVELING)
# feed in a leveling complete status message
self._send_port_agent_packet(driver, samples.LEVELED_STATUS)
# Assert we have returned to the command state
self.assertEquals(driver._protocol.get_current_state(), ProtocolState.AUTOSAMPLE)
expected = [call(ProtocolEvent.GET, Parameter.ALL), # startup get ALL
call(ProtocolEvent.START_LEVELING), # start leveling
call(ProtocolEvent.GET, Parameter.ALL), # config change get ALL
call(ProtocolEvent.STOP_LEVELING), # leveling timed out
call(ProtocolEvent.GET, Parameter.ALL)] # config change get ALL
time.sleep(.5)
# assert that we raised the expected events
self.assertEqual(driver._protocol._protocol_fsm.on_event.call_args_list, expected)
def test_leveling_failure(self):
"""
Test the driver processes a leveling failure particle, sets the correct flags.
"""
driver = self.test_connect()
driver._protocol._protocol_fsm.on_event(ProtocolEvent.START_LEVELING)
# assert we have entered a leveling state
self.assertTrue(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL))
# feed in a leveling failed status message
try:
self._send_port_agent_packet(driver, samples.X_OUT_OF_RANGE + samples.NEWLINE)
time.sleep(1)
except InstrumentDataException:
self.assertFalse(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL))
try:
self._send_port_agent_packet(driver, samples.Y_OUT_OF_RANGE + samples.NEWLINE)
time.sleep(1)
except InstrumentDataException:
self.assertFalse(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL))
self.assertEqual(driver._protocol.get_current_state(), ProtocolState.AUTOSAMPLE)
expected = [call(ProtocolEvent.GET, Parameter.ALL), # startup get ALL
call(ProtocolEvent.START_LEVELING), # start leveling
call(ProtocolEvent.GET, Parameter.ALL), # config change get ALL
call(ProtocolEvent.GET, Parameter.ALL), # config change get ALL
call(ProtocolEvent.STOP_LEVELING), # leveling timed out
call(ProtocolEvent.GET, Parameter.ALL)] # config change get ALL
# assert that we raised the expected events
self.assertEqual(driver._protocol._protocol_fsm.on_event.call_args_list, expected)
# assert the correct flags are set
self.assertEqual(driver._protocol._param_dict.get(Parameter.LILY_LEVELING), False)
self.assertEqual(driver._protocol._param_dict.get(Parameter.AUTO_RELEVEL), False)
self.assertEqual(driver._protocol._param_dict.get(Parameter.LEVELING_FAILED), True)
def test_pps_time_sync(self):
"""
Test that the time sync event is raised when PPS is regained.
"""
driver = self.test_connect()
self._send_port_agent_packet(driver, samples.NANO_VALID_SAMPLE_01) # PPS lost
self._send_port_agent_packet(driver, samples.NANO_VALID_SAMPLE_02) # PPS regained
expected = [call('DRIVER_EVENT_GET', 'DRIVER_PARAMETER_ALL'), # startup get ALL
call('PROTOCOL_EVENT_NANO_TIME_SYNC')] # Time sync event when PPS regained
# assert that we raised the expected events
self.assertEqual(driver._protocol._protocol_fsm.on_event.call_args_list, expected)
def test_driver_enums(self):
"""
Verify that all driver enumeration has no duplicate values that might cause confusion. Also
do a little extra validation for the Capabilities
"""
self.assert_enum_has_no_duplicates(particles.DataParticleType)
self.assert_enum_has_no_duplicates(ProtocolState)
self.assert_enum_has_no_duplicates(ProtocolEvent)
self.assert_enum_has_no_duplicates(Parameter)
# self.assert_enum_has_no_duplicates(InstrumentCommand())
# Test capabilities for duplicates, them verify that capabilities is a subset of protocol events
self.assert_enum_has_no_duplicates(Capability)
self.assert_enum_complete(Capability, ProtocolEvent)
def test_capabilities(self):
"""
Verify the FSM reports capabilities as expected. All states defined in this dict must
also be defined in the protocol FSM.
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_capabilities(driver, self._capabilities)
def test_protocol_filter_capabilities(self):
"""
This tests driver filter_capabilities.
Iterate through available capabilities, and verify that they can pass successfully through the filter.
Test silly made up capabilities to verify they are blocked by filter.
"""
mock_callback = Mock()
protocol = Protocol(Prompt, samples.NEWLINE, mock_callback)
driver_capabilities = Capability.list()
test_capabilities = Capability.list()
# BOTPT adds/removes leveling capabilities dynamically
# we need to remove the STOP capabilities for this test
driver_capabilities.remove(Capability.STOP_LEVELING)
# Add a bogus capability that will be filtered out.
test_capabilities.append("BOGUS_CAPABILITY")
# Verify "BOGUS_CAPABILITY was filtered out
self.assertEquals(sorted(driver_capabilities),
sorted(protocol._filter_capabilities(test_capabilities)))
def test_driver_schema(self):
"""
get the driver schema and verify it is configured properly
"""
driver = InstrumentDriver(self._got_data_event_callback)
self.assert_driver_schema(driver, self._driver_parameters, self._driver_capabilities)
###############################################################################
# INTEGRATION TESTS #
# Integration test test the direct driver / instrument interaction #
# but making direct calls via zeromq. #
# - Common Integration tests test the driver through the instrument agent #
# and common for all drivers (minimum requirement for ION ingestion) #
###############################################################################
@attr('INT', group='mi')
class DriverIntegrationTest(InstrumentDriverIntegrationTestCase, BotptTestMixinSub):
def setUp(self):
InstrumentDriverIntegrationTestCase.setUp(self)
def assert_acquire_status(self):
"""
Verify all status particles generated
"""
self.clear_events()
self.assert_async_particle_generation(particles.DataParticleType.BOTPT_STATUS,
self.assert_particle_botpt_status, timeout=20)
def assert_time_sync(self):
"""
Verify all status particles generated
"""
self.clear_events()
self.assert_async_particle_generation(particles.DataParticleType.NANO_SAMPLE,
self.assert_particle_nano_sample_01, timeout=20)
def test_connect(self):
self.assert_initialize_driver()
def test_get(self):
self.assert_initialize_driver()
for param in self._driver_parameters:
self.assert_get(param, self._driver_parameters[param][self.VALUE])
def test_set(self):
"""
Test all set commands. Verify all exception cases.
"""
self.assert_initialize_driver()
constraints = ParameterConstraint.dict()
parameters = Parameter.dict()
startup_config = self.test_config.driver_startup_config['parameters']
for key in constraints:
_type, minimum, maximum = constraints[key]
key = parameters[key]
if _type in [int, float]:
# assert we can set in range
self.assert_set(key, maximum - 1)
# assert exception when out of range
self.assert_set_exception(key, maximum + 1)
elif _type == bool:
# assert we can toggle a boolean parameter
if startup_config[key]:
self.assert_set(key, False)
else:
self.assert_set(key, True)
# assert bad types throw an exception
self.assert_set_exception(key, 'BOGUS')
def test_set_bogus_parameter(self):
"""
Verify setting a bad parameter raises an exception
"""
self.assert_initialize_driver()
self.assert_set_exception('BOGUS', 'CHEESE')
def test_startup_parameters(self):
new_values = {
Parameter.AUTO_RELEVEL: True,
Parameter.LEVELING_TIMEOUT: 601,
Parameter.XTILT_TRIGGER: 301,
Parameter.YTILT_TRIGGER: 301,
Parameter.OUTPUT_RATE: 1,
}
self.assert_initialize_driver()
self.assert_startup_parameters(self.assert_driver_parameters, new_values,
self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS])
def test_incomplete_config(self):
"""
Break our startup config, then verify the driver raises an exception
"""
# grab the old config
startup_params = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
old_value = startup_params[Parameter.LEVELING_TIMEOUT]
failed = False
try:
# delete a required parameter
del (startup_params[Parameter.LEVELING_TIMEOUT])
# re-init to take our broken config
self.init_driver_process_client()
self.assert_initialize_driver()
failed = True
except ResourceError as e:
log.info('Exception thrown, test should pass: %r', e)
finally:
startup_params[Parameter.LEVELING_TIMEOUT] = old_value
if failed:
self.fail('Failed to throw exception on missing parameter')
def test_auto_relevel(self):
"""
Test for verifying auto relevel
"""
self.assert_initialize_driver()
# set the leveling timeout low, so we're not here for long
self.assert_set(Parameter.LEVELING_TIMEOUT, 60, no_get=True)
# Set the XTILT to a low threshold so that the driver will
# automatically start the re-leveling operation
# NOTE: This test MAY fail if the instrument completes
# leveling before the triggers have been reset to 300
self.assert_set(Parameter.XTILT_TRIGGER, 0, no_get=True)
self.assert_driver_command(Capability.DISCOVER, state=ProtocolState.AUTOSAMPLE)
self.assert_async_particle_generation(particles.DataParticleType.LILY_LEVELING,
self.assert_particle_lily_leveling_01)
# verify the flag is set
self.assert_get(Parameter.LILY_LEVELING, True)
def test_autosample(self):
"""
Test for turning data on
"""
self.assert_initialize_driver()
self.assert_driver_command(Capability.DISCOVER, state=ProtocolState.AUTOSAMPLE)
rate = int(self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS][Parameter.OUTPUT_RATE])
# autosample for 10 seconds, then count the samples...
# we can't test "inline" because the nano data rate is too high.
time.sleep(10)
for particle_type, assert_func, count in [
(particles.DataParticleType.LILY_SAMPLE, self.assert_particle_lily_sample_01, 5),
(particles.DataParticleType.HEAT_SAMPLE, self.assert_particle_heat_sample_01, 5),
(particles.DataParticleType.IRIS_SAMPLE, self.assert_particle_iris_sample_01, 5),
(particles.DataParticleType.NANO_SAMPLE, self.assert_particle_nano_sample_01, 5 * rate)
]:
self.assert_async_particle_generation(particle_type, assert_func, particle_count=count, timeout=1)
def test_commanded_acquire_status(self):
"""
Test for acquiring status
"""
self.assert_initialize_driver()
# Issue acquire status command
self.assert_particle_generation(Capability.ACQUIRE_STATUS, particles.DataParticleType.BOTPT_STATUS,
self.assert_particle_botpt_status)
def test_leveling_complete(self):
"""
Test for leveling complete
"""
self.assert_initialize_driver()
# go to autosample
self.assert_driver_command(Capability.DISCOVER, state=ProtocolState.AUTOSAMPLE, delay=5)
#Issue start leveling command
self.assert_driver_command(Capability.START_LEVELING)
# Verify the flag is set
self.assert_get(Parameter.LILY_LEVELING, True)
# Leveling should complete or abort after DEFAULT_LEVELING_TIMEOUT seconds
timeout = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS][Parameter.LEVELING_TIMEOUT]
# wait for a sample particle to indicate leveling is complete
self.clear_events()
self.assert_async_particle_generation(particles.DataParticleType.LILY_SAMPLE,
self.assert_particle_lily_sample_01,
timeout=timeout+10)
# Verify the flag is unset
self.assert_get(Parameter.LILY_LEVELING, False)
def test_scheduled_acquire_status(self):
"""
Verify we can schedule an acquire status event
"""
self.assert_scheduled_event(ScheduledJob.ACQUIRE_STATUS, self.assert_acquire_status, delay=20)
def test_scheduled_time_sync(self):
"""
Verify we can schedule a time sync event.
If we sync time in command mode, we will generate at least one NANO sample particle.
"""
self.assert_scheduled_event(ScheduledJob.NANO_TIME_SYNC, self.assert_time_sync, delay=20)
###############################################################################
# QUALIFICATION TESTS #
# Device specific qualification tests are for doing final testing of ion #
# integration. The generally aren't used for instrument debugging and should #
# be tackled after all unit and integration tests are complete #
###############################################################################
@attr('QUAL', group='mi')
class DriverQualificationTest(InstrumentDriverQualificationTestCase, BotptTestMixinSub):
def setUp(self):
InstrumentDriverQualificationTestCase.setUp(self)
def assert_cycle(self):
"""
Assert we can enter autosample, acquire all particles, acquire status,
stop autosample, acquire heat particle, acquire_status.
"""
self.assert_start_autosample()
# verify all particles in autosample
self.assert_particle_async(particles.DataParticleType.LILY_SAMPLE, self.assert_particle_lily_sample_01)
self.assert_particle_async(particles.DataParticleType.IRIS_SAMPLE, self.assert_particle_iris_sample_01)
self.assert_particle_async(particles.DataParticleType.NANO_SAMPLE, self.assert_particle_nano_sample_01)
self.assert_particle_async(particles.DataParticleType.HEAT_SAMPLE, self.assert_particle_heat_sample_01)
self.assert_particle_polled(Capability.ACQUIRE_STATUS, self.assert_particle_botpt_status,
particles.DataParticleType.BOTPT_STATUS, timeout=60)
self.assert_particle_async(particles.DataParticleType.LILY_SAMPLE, self.assert_particle_lily_sample_01)
self.assert_particle_async(particles.DataParticleType.IRIS_SAMPLE, self.assert_particle_iris_sample_01)
self.assert_particle_async(particles.DataParticleType.NANO_SAMPLE, self.assert_particle_nano_sample_01)
self.assert_particle_async(particles.DataParticleType.HEAT_SAMPLE, self.assert_particle_heat_sample_01)
# verify all particles in command
self.assert_particle_async(particles.DataParticleType.HEAT_SAMPLE, self.assert_particle_heat_sample_01)
self.assert_particle_polled(Capability.ACQUIRE_STATUS, self.assert_particle_botpt_status,
particles.DataParticleType.BOTPT_STATUS, timeout=60)
def test_cycle(self):
"""
Verify we can run through the test cycle 4 times
"""
self.assert_enter_command_mode()
for x in xrange(4):
log.debug('test_cycle -- PASS %d', x + 1)
self.assert_cycle()
def test_direct_access_telnet_mode(self):
"""
This test manually tests that the Instrument Driver properly supports
direct access to the physical instrument. (telnet mode)
"""
self.assert_direct_access_start_telnet()
self.assertTrue(self.tcp_client)
self.tcp_client.send_data(InstrumentCommands.LILY_DUMP1 + samples.NEWLINE)
result = self.tcp_client.expect('-DUMP-SETTINGS')
self.assertTrue(result, msg='Failed to receive expected response in direct access mode.')
self.assert_direct_access_stop_telnet()
self.assert_state_change(ResourceAgentState.STREAMING, ProtocolState.AUTOSAMPLE, 10)
def test_leveling(self):
"""
Verify we can stop/start leveling
"""
self.assert_enter_command_mode()
self.assert_resource_command(Capability.START_LEVELING)
self.assert_get_parameter(Parameter.LILY_LEVELING, True)
self.assert_particle_async(particles.DataParticleType.LILY_LEVELING, self.assert_particle_lily_leveling_01)
self.assert_resource_command(Capability.STOP_LEVELING)
self.assert_get_parameter(Parameter.LILY_LEVELING, False)
def test_get_set_parameters(self):
"""
verify that all parameters can be get set properly, this includes
ensuring that read only parameters fail on set.
"""
self.assert_enter_command_mode()
constraints = ParameterConstraint.dict()
reverse_param = Parameter.reverse_dict()
startup_config = self.test_config.driver_startup_config[DriverConfigKey.PARAMETERS]
for key in self._driver_parameters:
if self._driver_parameters[key][self.READONLY]:
self.assert_read_only_parameter(key)
else:
name = reverse_param.get(key)
if name in constraints:
_type, minimum, maximum = constraints[name]
if _type in [int, float]:
# assert we can set in range
self.assert_set_parameter(key, maximum - 1)
# assert exception when out of range
with self.assertRaises(BadRequest):
self.assert_set_parameter(key, maximum + 1)
elif _type == bool:
# assert we can toggle a boolean parameter
if startup_config[key]:
self.assert_set_parameter(key, False)
else:
self.assert_set_parameter(key, True)
# assert bad types throw an exception
with self.assertRaises(BadRequest):
self.assert_set_parameter(key, 'BOGUS')
def test_get_capabilities(self):
"""
Verify that the correct capabilities are returned from get_capabilities
at various driver/agent states.
"""
self.assert_enter_command_mode()
##################
# Command Mode
##################
capabilities = {
AgentCapabilityType.AGENT_COMMAND: self._common_agent_commands(ResourceAgentState.COMMAND),
AgentCapabilityType.AGENT_PARAMETER: self._common_agent_parameters(),
AgentCapabilityType.RESOURCE_COMMAND: [
ProtocolEvent.GET,
ProtocolEvent.SET,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.START_LEVELING,
ProtocolEvent.STOP_LEVELING,
],
AgentCapabilityType.RESOURCE_INTERFACE: None,
AgentCapabilityType.RESOURCE_PARAMETER: self._driver_parameters.keys()
}
self.assert_capabilities(capabilities)
##################
# Streaming Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.STREAMING)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = [
ProtocolEvent.GET,
ProtocolEvent.ACQUIRE_STATUS,
ProtocolEvent.START_LEVELING,
ProtocolEvent.STOP_LEVELING,
]
self.assert_start_autosample()
self.assert_capabilities(capabilities)
##################
# DA Mode
##################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.DIRECT_ACCESS)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = self._common_da_resource_commands()
self.assert_direct_access_start_telnet()
self.assert_capabilities(capabilities)
self.assert_direct_access_stop_telnet()
#######################
# Uninitialized Mode
#######################
capabilities[AgentCapabilityType.AGENT_COMMAND] = self._common_agent_commands(ResourceAgentState.UNINITIALIZED)
capabilities[AgentCapabilityType.RESOURCE_COMMAND] = []
capabilities[AgentCapabilityType.RESOURCE_INTERFACE] = []
capabilities[AgentCapabilityType.RESOURCE_PARAMETER] = []
self.assert_reset()
self.assert_capabilities(capabilities)
def test_direct_access_exit_from_autosample(self):
"""
Overridden. This driver always discovers to autosample
"""
def test_discover(self):
"""
Overridden. The driver always discovers to autosample
"""
# Verify the agent is in command mode
#self.assert_enter_command_mode()
# Now reset and try to discover. This will stop the driver which holds the current
# instrument state.
self.assert_reset()
self.assert_discover(ResourceAgentState.COMMAND)
| |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import torch
import pyro
import pyro.distributions.torch as dist
import pyro.poutine as poutine
from pyro.contrib.autoname import autoname, sample
def test_basic_scope():
@autoname
def f1():
sample(dist.Normal(0, 1))
return sample(dist.Bernoulli(0.5))
@autoname(name="model")
def f2():
sample("x", dist.Bernoulli(0.5))
return sample(dist.Normal(0.0, 1.0))
tr1 = poutine.trace(f1).get_trace()
assert "f1/Normal" in tr1.nodes
assert "f1/Bernoulli" in tr1.nodes
tr2 = poutine.trace(f2).get_trace()
assert "model/x" in tr2.nodes
assert "model/Normal" in tr2.nodes
def test_repeat_names():
@autoname
def f1():
sample(dist.Normal(0, 1))
sample(dist.Normal(0, 1))
return sample(dist.Bernoulli(0.5))
@autoname(name="model")
def f2():
sample("x", dist.Bernoulli(0.5))
sample("x", dist.Bernoulli(0.5))
sample("x", dist.Bernoulli(0.5))
return sample(dist.Normal(0.0, 1.0))
tr1 = poutine.trace(f1).get_trace()
assert "f1/Normal" in tr1.nodes
assert "f1/Normal1" in tr1.nodes
assert "f1/Bernoulli" in tr1.nodes
tr2 = poutine.trace(f2).get_trace()
assert "model/x" in tr2.nodes
assert "model/x1" in tr2.nodes
assert "model/x2" in tr2.nodes
assert "model/Normal" in tr2.nodes
def test_compose_scopes():
@autoname
def f1():
return sample(dist.Bernoulli(0.5))
@autoname
def f2():
f1()
return sample(dist.Bernoulli(0.5))
@autoname
def f3():
f1()
f1()
f1()
f2()
return sample(dist.Normal(0, 1))
tr1 = poutine.trace(f1).get_trace()
assert "f1/Bernoulli" in tr1.nodes
tr2 = poutine.trace(f2).get_trace()
assert "f2/f1/Bernoulli" in tr2.nodes
assert "f2/Bernoulli" in tr2.nodes
tr3 = poutine.trace(f3).get_trace()
assert "f3/f1/Bernoulli" in tr3.nodes
assert "f3/f1__1/Bernoulli" in tr3.nodes
assert "f3/f1__2/Bernoulli" in tr3.nodes
assert "f3/f2/f1/Bernoulli" in tr3.nodes
assert "f3/f2/Bernoulli" in tr3.nodes
assert "f3/Normal" in tr3.nodes
def test_basic_loop():
@autoname
def f1():
return sample(dist.Bernoulli(0.5))
@autoname(name="model")
def f2():
f1()
for i in range(3):
f1()
sample("x", dist.Bernoulli(0.5))
return sample(dist.Normal(0.0, 1.0))
tr = poutine.trace(f2).get_trace()
assert "model/f1/Bernoulli" in tr.nodes
assert "model/f1__1/Bernoulli" in tr.nodes
assert "model/f1__2/Bernoulli" in tr.nodes
assert "model/f1__3/Bernoulli" in tr.nodes
assert "model/x" in tr.nodes
assert "model/x1" in tr.nodes
assert "model/x2" in tr.nodes
assert "model/Normal" in tr.nodes
def test_named_loop():
@autoname
def f1():
return sample(dist.Bernoulli(0.5))
@autoname(name="model")
def f2():
f1()
# for i in autoname(name="loop")(range(3)): <- this works too
for i in autoname(range(3), name="loop"):
f1()
sample("x", dist.Bernoulli(0.5))
return sample(dist.Normal(0.0, 1.0))
tr = poutine.trace(f2).get_trace()
assert "model/f1/Bernoulli" in tr.nodes
assert "model/loop/f1/Bernoulli" in tr.nodes
assert "model/loop__1/f1/Bernoulli" in tr.nodes
assert "model/loop__2/f1/Bernoulli" in tr.nodes
assert "model/loop/x" in tr.nodes
assert "model/loop__1/x" in tr.nodes
assert "model/loop__2/x" in tr.nodes
assert "model/Normal" in tr.nodes
def test_sequential_plate():
@autoname
def f1():
return sample(dist.Bernoulli(0.5))
@autoname(name="model")
def f2():
for i in autoname(pyro.plate(name="data", size=3)):
f1()
return sample(dist.Bernoulli(0.5))
expected_names = [
"model/data/f1/Bernoulli",
"model/data__1/f1/Bernoulli",
"model/data__2/f1/Bernoulli",
"model/Bernoulli",
]
tr = poutine.trace(f2).get_trace()
actual_names = [
name
for name, node in tr.nodes.items()
if node["type"] == "sample" and type(node["fn"]).__name__ != "_Subsample"
]
assert expected_names == actual_names
def test_nested_plate():
@autoname
def f1():
return sample(dist.Bernoulli(0.5))
@autoname(name="model")
def f2():
for i in autoname(pyro.plate(name="data", size=3)):
for j in autoname(range(2), name="xy"):
f1()
return sample(dist.Bernoulli(0.5))
expected_names = [
"model/data/xy/f1/Bernoulli",
"model/data/xy__1/f1/Bernoulli",
"model/data__1/xy/f1/Bernoulli",
"model/data__1/xy__1/f1/Bernoulli",
"model/data__2/xy/f1/Bernoulli",
"model/data__2/xy__1/f1/Bernoulli",
"model/Bernoulli",
]
tr = poutine.trace(f2).get_trace()
actual_names = [
name
for name, node in tr.nodes.items()
if node["type"] == "sample" and type(node["fn"]).__name__ != "_Subsample"
]
assert expected_names == actual_names
def test_model_guide():
@autoname
def model():
sample("x", dist.HalfNormal(1))
return sample(dist.Bernoulli(0.5))
@autoname(name="model")
def guide():
sample("x", dist.Gamma(1, 1))
return sample(dist.Bernoulli(0.5))
model_tr = poutine.trace(model).get_trace()
guide_tr = poutine.trace(guide).get_trace()
assert "model/x" in model_tr.nodes
assert "model/x" in guide_tr.nodes
assert "model/Bernoulli" in model_tr.nodes
assert "model/Bernoulli" in guide_tr.nodes
def test_context_manager():
@autoname
def f1():
return sample(dist.Bernoulli(0.5))
def f2():
with autoname(name="prefix"):
f1()
f1()
tr2 = poutine.trace(f2).get_trace()
assert "prefix/f1/Bernoulli" in tr2.nodes
assert "prefix/f1__1/Bernoulli" in tr2.nodes
# tests copied from test_scope.py
def test_multi_nested():
@autoname
def model1(r=True):
model2()
model2()
with autoname(name="inter"):
model2()
if r:
model1(r=False)
model2()
@autoname
def model2():
return sample("y", dist.Normal(0.0, 1.0))
expected_names = [
"model1/model2/y",
"model1/model2__1/y",
"model1/inter/model2/y",
"model1/inter/model1/model2/y",
"model1/inter/model1/model2__1/y",
"model1/inter/model1/inter/model2/y",
"model1/inter/model1/model2__2/y",
"model1/model2__2/y",
]
tr = poutine.trace(model1).get_trace(r=True)
actual_names = [
name
for name, node in tr.nodes.items()
if node["type"] == "sample" and type(node["fn"]).__name__ != "_Subsample"
]
assert expected_names == actual_names
def test_recur_multi():
@autoname
def model1(r=True):
model2()
with autoname(name="inter"):
model2()
if r:
model1(r=False)
model2()
@autoname
def model2():
return sample("y", dist.Normal(0.0, 1.0))
expected_names = [
"model1/model2/y",
"model1/inter/model2/y",
"model1/inter/model1/model2/y",
"model1/inter/model1/inter/model2/y",
"model1/inter/model1/model2__1/y",
"model1/model2__1/y",
]
tr = poutine.trace(model1).get_trace()
actual_names = [
name
for name, node in tr.nodes.items()
if node["type"] == "sample" and type(node["fn"]).__name__ != "_Subsample"
]
assert expected_names == actual_names
def test_only_withs():
def model1():
with autoname(name="a"):
with autoname(name="b"):
sample("x", dist.Bernoulli(0.5))
tr1 = poutine.trace(model1).get_trace()
assert "a/b/x" in tr1.nodes
tr2 = poutine.trace(autoname(model1)).get_trace()
assert "model1/a/b/x" in tr2.nodes
def test_mutual_recur():
@autoname
def model1(n):
sample("a", dist.Bernoulli(0.5))
if n <= 0:
return
else:
return model2(n - 1)
@autoname
def model2(n):
sample("b", dist.Bernoulli(0.5))
if n <= 0:
return
else:
model1(n)
expected_names = ["model2/b", "model2/model1/a", "model2/model1/model2/b"]
tr = poutine.trace(model2).get_trace(1)
actual_names = [
name
for name, node in tr.nodes.items()
if node["type"] == "sample" and type(node["fn"]).__name__ != "_Subsample"
]
assert expected_names == actual_names
def test_simple_recur():
@autoname
def geometric(p):
x = sample("x", dist.Bernoulli(p))
if x.item() == 1.0:
# model1()
return x + geometric(p)
else:
return x
prev_name = "x"
for name, node in poutine.trace(geometric).get_trace(0.9).nodes.items():
if node["type"] == "sample":
assert name == "geometric/" + prev_name
prev_name = "geometric/" + prev_name
def test_no_param():
pyro.clear_param_store()
@autoname
def model():
a = pyro.param("a", torch.tensor(0.5))
return sample("b", dist.Bernoulli(a))
expected_names = ["a", "model/b"]
tr = poutine.trace(model).get_trace()
actual_names = [
name for name, node in tr.nodes.items() if node["type"] in ("param", "sample")
]
assert expected_names == actual_names
| |
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the ExecuteProcess action."""
import shlex
import threading
from typing import Dict
from typing import Iterable
from typing import List
from typing import Optional
from typing import Text
from .execute_local import ExecuteLocal
from ..descriptions import Executable
from ..frontend import Entity
from ..frontend import expose_action
from ..frontend import Parser
from ..some_substitutions_type import SomeSubstitutionsType
from ..substitutions import TextSubstitution
_global_process_counter_lock = threading.Lock()
_global_process_counter = 0 # in Python3, this number is unbounded (no rollover)
@expose_action('executable')
class ExecuteProcess(ExecuteLocal):
"""
Action that begins executing a process and sets up event handlers for it.
Simple example:
.. doctest::
>>> ld = LaunchDescription([
... ExecuteProcess(
... cmd=['ls', '-las'],
... name='my_ls_process', # this is optional
... output='both',
... ),
... ])
.. code-block:: xml
<launch>
<executable cmd="ls -las" name="my_ls_process" output="both"/>
</launch>
Substitutions in the command:
.. doctest::
>>> ld = LaunchDescription([
... DeclareLaunchArgument(name='file_path', description='file path to cat'),
... ExecuteProcess(
... # each item of the command arguments' list can be:
... # a string ('cat'),
... # a substitution (`LaunchConfiguration('file_path')`),
... # or a list of string/substitutions
... # (`[LaunchConfiguration('directory'), '/file.txt']`)
... cmd=['cat', LaunchConfiguration('file_path')],
... ),
... ])
.. code-block:: xml
<launch>
<arg name="file_path" description="path of the file to cat"/>
<executable cmd="cat $(var file_path)"/>
</launch>
Optional cli argument:
.. doctest::
>>> ld = LaunchDescription([
... DeclareLaunchArgument(name='open_gui', default_value='False'),
... ExecuteProcess(
... cmd=['my_cmd', '--open-gui'],
... condition=IfCondition(LaunchConfiguration('open_gui')),
... ),
... ExecuteProcess(
... cmd=['my_cmd'],
... condition=UnlessCondition(LaunchConfiguration('open_gui')),
... ),
... ])
.. code-block:: xml
<launch>
<arg name="open_gui" description="when truthy, the gui will be opened"/>
<executable cmd="my_cmd --open-gui" if="$(var open_gui)"/>
<executable cmd="my_cmd" unless="$(var open_gui)"/>
</launch>
Environment variables:
.. doctest::
>>> ld = LaunchDescription([
... ExecuteProcess(
... cmd=['my_cmd'],
... additional_env={'env_variable': 'env_var_value'},
... ),
... ])
.. code-block:: xml
<launch>
<executable cmd="my_cmd">
<env name="env_variable" value="env_var_value"/>
</executable>
</launch>
"""
def __init__(
self,
*,
cmd: Iterable[SomeSubstitutionsType],
prefix: Optional[SomeSubstitutionsType] = None,
name: Optional[SomeSubstitutionsType] = None,
cwd: Optional[SomeSubstitutionsType] = None,
env: Optional[Dict[SomeSubstitutionsType, SomeSubstitutionsType]] = None,
additional_env: Optional[Dict[SomeSubstitutionsType, SomeSubstitutionsType]] = None,
**kwargs
) -> None:
"""
Construct an ExecuteProcess action.
Many arguments are passed eventually to :class:`subprocess.Popen`, so
see the documentation for the class for additional details.
This action, once executed, registers several event handlers for
various process related events and will also emit events asynchronously
when certain events related to the process occur.
Handled events include:
- launch.events.process.ShutdownProcess:
- begins standard shutdown procedure for a running executable
- launch.events.process.SignalProcess:
- passes the signal provided by the event to the running process
- launch.events.process.ProcessStdin:
- passes the text provided by the event to the stdin of the process
- launch.events.Shutdown:
- same as ShutdownProcess
Emitted events include:
- launch.events.process.ProcessStarted:
- emitted when the process starts
- launch.events.process.ProcessExited:
- emitted when the process exits
- event contains return code
- launch.events.process.ProcessStdout and launch.events.process.ProcessStderr:
- emitted when the process produces data on either the stdout or stderr pipes
- event contains the data from the pipe
Note that output is just stored in this class and has to be properly
implemented by the event handlers for the process's ProcessIO events.
:param: cmd a list where the first item is the executable and the rest
are arguments to the executable, each item may be a string or a
list of strings and Substitutions to be resolved at runtime
:param: cwd the directory in which to run the executable
:param: name the label used to represent the process, as a string or a
Substitution to be resolved at runtime, defaults to the basename of
the executable
:param: env dictionary of environment variables to be used, starting from
a clean environment. If 'None', the current environment is used.
:param: additional_env dictionary of environment variables to be added.
If 'env' was None, they are added to the current environment.
If not, 'env' is updated with additional_env.
:param: shell if True, a shell is used to execute the cmd
:param: sigterm_timeout time until shutdown should escalate to SIGTERM,
as a string or a list of strings and Substitutions to be resolved
at runtime, defaults to the LaunchConfiguration called
'sigterm_timeout'
:param: sigkill_timeout time until escalating to SIGKILL after SIGTERM,
as a string or a list of strings and Substitutions to be resolved
at runtime, defaults to the LaunchConfiguration called
'sigkill_timeout'
:param: emulate_tty emulate a tty (terminal), defaults to False, but can
be overridden with the LaunchConfiguration called 'emulate_tty',
the value of which is evaluated as true or false according to
:py:func:`evaluate_condition_expression`.
Throws :py:exception:`InvalidConditionExpressionError` if the
'emulate_tty' configuration does not represent a boolean.
:param: prefix a set of commands/arguments to preceed the cmd, used for
things like gdb/valgrind and defaults to the LaunchConfiguration
called 'launch-prefix'. Note that a non-default prefix provided in
a launch file will override the prefix provided via the `launch-prefix`
launch configuration regardless of whether the `launch-prefix-filter` launch
configuration is provided.
:param: output configuration for process output logging. Defaults to 'log'
i.e. log both stdout and stderr to launch main log file and stderr to
the screen.
Overridden externally by the OVERRIDE_LAUNCH_PROCESS_OUTPUT envvar value.
See `launch.logging.get_output_loggers()` documentation for further
reference on all available options.
:param: output_format for logging each output line, supporting `str.format()`
substitutions with the following keys in scope: `line` to reference the raw
output line and `this` to reference this action instance.
:param: log_cmd if True, prints the final cmd before executing the
process, which is useful for debugging when substitutions are
involved.
:param: cached_output if `True`, both stdout and stderr will be cached.
Use get_stdout() and get_stderr() to read the buffered output.
:param: on_exit list of actions to execute upon process exit.
:param: respawn if 'True', relaunch the process that abnormally died.
Defaults to 'False'.
:param: respawn_delay a delay time to relaunch the died process if respawn is 'True'.
"""
executable = Executable(cmd=cmd, prefix=prefix, name=name, cwd=cwd, env=env,
additional_env=additional_env)
super().__init__(process_description=executable, **kwargs)
@classmethod
def _parse_cmdline(
cls,
cmd: Text,
parser: Parser
) -> List[SomeSubstitutionsType]:
"""
Parse text apt for command line execution.
:param: cmd a space (' ') delimited command line arguments list.
All found `TextSubstitution` items are split and added to the
list again as a `TextSubstitution`.
:returns: a list of command line arguments.
"""
result_args = []
arg = []
def _append_arg():
nonlocal arg
result_args.append(arg)
arg = []
for sub in parser.parse_substitution(cmd):
if isinstance(sub, TextSubstitution):
tokens = shlex.split(sub.text)
if not tokens:
# Sting with just spaces.
# Appending args allow splitting two substitutions
# separated by a space.
# e.g.: `$(subst1 asd) $(subst2 bsd)` will be two separate arguments.
_append_arg()
continue
if sub.text[0].isspace():
# Needed for splitting from the previous argument
# e.g.: `$(find-exec bsd) asd`
# It splits `asd` from the path of `bsd` executable.
if len(arg) != 0:
_append_arg()
arg.append(TextSubstitution(text=tokens[0]))
if len(tokens) > 1:
# Needed to split the first argument when more than one token.
# e.g. `$(find-pkg-prefix csd)/asd bsd`
# will split `$(find-pkg-prefix csd)/asd` from `bsd`.
_append_arg()
arg.append(TextSubstitution(text=tokens[-1]))
if len(tokens) > 2:
# If there are more than two tokens, just add all the middle tokens to
# `result_args`.
# e.g. `$(find-pkg-prefix csd)/asd bsd dsd xsd`
# 'bsd' 'dsd' will be added.
result_args.extend([TextSubstitution(text=x)] for x in tokens[1:-1])
if sub.text[-1].isspace():
# Allows splitting from next argument.
# e.g. `exec $(find-some-file)`
# Will split `exec` argument from the result of `find-some-file` substitution.
_append_arg()
else:
arg.append(sub)
if arg:
result_args.append(arg)
return result_args
@classmethod
def parse(
cls,
entity: Entity,
parser: Parser,
ignore: Optional[List[str]] = None
):
"""
Return the `ExecuteProcess` action and kwargs for constructing it.
:param: ignore A list of arguments that should be ignored while parsing.
Intended for code reuse in derived classes (e.g.: launch_ros.actions.Node).
"""
_, kwargs = super().parse(entity, parser)
if ignore is None:
ignore = []
if 'cmd' not in ignore:
kwargs['cmd'] = cls._parse_cmdline(entity.get_attr('cmd'), parser)
if 'cwd' not in ignore:
cwd = entity.get_attr('cwd', optional=True)
if cwd is not None:
kwargs['cwd'] = parser.parse_substitution(cwd)
if 'name' not in ignore:
name = entity.get_attr('name', optional=True)
if name is not None:
kwargs['name'] = parser.parse_substitution(name)
if 'prefix' not in ignore:
prefix = entity.get_attr('launch-prefix', optional=True)
if prefix is not None:
kwargs['prefix'] = parser.parse_substitution(prefix)
if 'output' not in ignore:
output = entity.get_attr('output', optional=True)
if output is not None:
kwargs['output'] = parser.parse_substitution(output)
if 'respawn' not in ignore:
respawn = entity.get_attr('respawn', data_type=bool, optional=True)
if respawn is not None:
kwargs['respawn'] = respawn
if 'respawn_delay' not in ignore:
respawn_delay = entity.get_attr('respawn_delay', data_type=float, optional=True)
if respawn_delay is not None:
if respawn_delay < 0.0:
raise ValueError(
'Attribute respawn_delay of Entity node expected to be '
'a non-negative value but got `{}`'.format(respawn_delay)
)
kwargs['respawn_delay'] = respawn_delay
if 'shell' not in ignore:
shell = entity.get_attr('shell', data_type=bool, optional=True)
if shell is not None:
kwargs['shell'] = shell
if 'additional_env' not in ignore:
# Conditions won't be allowed in the `env` tag.
# If that feature is needed, `set_enviroment_variable` and
# `unset_enviroment_variable` actions should be used.
env = entity.get_attr('env', data_type=List[Entity], optional=True)
if env is not None:
kwargs['additional_env'] = {
tuple(parser.parse_substitution(e.get_attr('name'))):
parser.parse_substitution(e.get_attr('value')) for e in env
}
for e in env:
e.assert_entity_completely_parsed()
return cls, kwargs
@property
def name(self):
"""Getter for name."""
if self.process_description.final_name is not None:
return self.process_description.final_name
return self.process_description.name
@property
def cmd(self):
"""Getter for cmd."""
if self.process_description.final_cmd is not None:
return self.process_description.final_cmd
return self.process_description.cmd
@property
def cwd(self):
"""Getter for cwd."""
if self.process_description.final_cwd is not None:
return self.process_description.final_cwd
return self.process_description.cwd
@property
def env(self):
"""Getter for env."""
if self.process_description.final_env is not None:
return self.process_description.final_env
return self.process_description.env
@property
def additional_env(self):
"""Getter for additional_env."""
return self.process_description.additional_env
| |
# -*- coding: utf-8 -*-
from __future__ import division
from nose.tools import raises
from numpy.testing import assert_allclose, assert_equal
import numpy as np
from SALib.analyze.morris import analyze, \
compute_mu_star_confidence, \
compute_elementary_effects, \
get_increased_values, \
get_decreased_values, \
compute_grouped_metric
def test_compute_mu_star_confidence():
'''
Tests that compute mu_star_confidence is computed correctly
'''
ee = np.array([2.52, 2.01, 2.30, 0.66, 0.93, 1.3], dtype=np.float)
num_trajectories = 6
num_resamples = 1000
conf_level = 0.95
actual = compute_mu_star_confidence(ee, num_trajectories, num_resamples, conf_level)
expected = 0.5
assert_allclose(actual, expected, atol=1e-01)
def test_analysis_of_morris_results():
'''
Tests a one-dimensional vector of results
Taken from the solution to Exercise 4 (p.138) in Saltelli (2008).
'''
model_input = np.array([[0, 1. / 3], [0, 1], [2. / 3, 1],
[0, 1. / 3], [2. / 3, 1. / 3], [2. / 3, 1],
[2. / 3, 0], [2. / 3, 2. / 3], [0, 2. / 3],
[1. / 3, 1], [1, 1], [1, 1. / 3],
[1. / 3, 1], [1. / 3, 1. / 3], [1, 1. / 3],
[1. / 3, 2. / 3], [1. / 3, 0], [1, 0]],
dtype=np.float)
model_output = np.array([0.97, 0.71, 2.39, 0.97, 2.30, 2.39,
1.87, 2.40, 0.87, 2.15, 1.71, 1.54,
2.15, 2.17, 1.54, 2.20, 1.87, 1.0],
dtype=np.float)
problem = {
'num_vars': 2,
'names': ['Test 1', 'Test 2'],
'groups': None,
'bounds': [[0.0, 1.0], [0.0, 1.0]]
}
Si = analyze(problem, model_input, model_output,
num_resamples=1000,
conf_level=0.95,
print_to_console=False)
desired_mu = np.array([0.66, 0.21])
assert_allclose(Si['mu'], desired_mu, rtol=1e-1,
err_msg="The values for mu are incorrect")
desired_mu_star = np.array([1.62, 0.35])
assert_allclose(Si['mu_star'], desired_mu_star, rtol=1e-2,
err_msg="The values for mu star are incorrect")
desired_sigma = np.array([1.79, 0.41])
assert_allclose(Si['sigma'], desired_sigma, rtol=1e-2,
err_msg="The values for sigma are incorrect")
desired_names = ['Test 1', 'Test 2']
assert_equal(Si['names'], desired_names,
err_msg="The values for names are incorrect")
@raises(ValueError)
def test_conf_level_within_zero_one_bounds():
ee = [0, 0, 0]
N = 1
num_resamples = 2
conf_level_too_low = -1
compute_mu_star_confidence(ee, N, num_resamples, conf_level_too_low)
conf_level_too_high = 2
compute_mu_star_confidence(ee, N, num_resamples, conf_level_too_high)
def test_compute_elementary_effects():
'''
Inputs for elementary effects taken from Exercise 5 from Saltelli (2008).
See page 140-145.
`model_inputs` are from trajectory t_1 from table 3.10 on page 141.
`desired` is equivalent to column t_1 in table 3.12 on page 145.
'''
model_inputs = np.array([
[1.64, -1.64, -1.64, 0.39, -0.39, 0.39, -1.64, -
1.64, -0.39, -0.39, 1.64, 1.64, -0.39, 0.39, 1.64],
[1.64, -1.64, -1.64, 0.39, -0.39, -1.64, -1.64, -
1.64, -0.39, -0.39, 1.64, 1.64, -0.39, 0.39, 1.64],
[1.64, -1.64, -1.64, 0.39, -0.39, -1.64, -1.64, -
1.64, 1.64, -0.39, 1.64, 1.64, -0.39, 0.39, 1.64],
[1.64, -1.64, -1.64, 0.39, -0.39, -1.64, -1.64,
0.39, 1.64, -0.39, 1.64, 1.64, -0.39, 0.39, 1.64],
[1.64, -1.64, -1.64, -1.64, -0.39, -1.64, -1.64,
0.39, 1.64, -0.39, 1.64, 1.64, -0.39, 0.39, 1.64],
[1.64, -1.64, -1.64, -1.64, -0.39, -1.64, -1.64,
0.39, 1.64, -0.39, 1.64, -0.39, -0.39, 0.39, 1.64],
[1.64, -1.64, -1.64, -1.64, -0.39, -1.64, -1.64,
0.39, 1.64, -0.39, 1.64, -0.39, -0.39, -1.64, 1.64],
[1.64, 0.39, -1.64, -1.64, -0.39, -1.64, -1.64,
0.39, 1.64, -0.39, 1.64, -0.39, -0.39, -1.64, 1.64],
[1.64, 0.39, -1.64, -1.64, -0.39, -1.64, -1.64, 0.39,
1.64, -0.39, 1.64, -0.39, -0.39, -1.64, -0.39],
[1.64, 0.39, -1.64, -1.64, -0.39, -1.64, -1.64,
0.39, 1.64, 1.64, 1.64, -0.39, -0.39, -1.64, -0.39],
[1.64, 0.39, -1.64, -1.64, 1.64, -1.64, -1.64, 0.39,
1.64, 1.64, 1.64, -0.39, -0.39, -1.64, -0.39],
[1.64, 0.39, -1.64, -1.64, 1.64, -1.64, -1.64, 0.39,
1.64, 1.64, -0.39, -0.39, -0.39, -1.64, -0.39],
[1.64, 0.39, -1.64, -1.64, 1.64, -1.64, 0.39, 0.39,
1.64, 1.64, -0.39, -0.39, -0.39, -1.64, -0.39],
[1.64, 0.39, 0.39, -1.64, 1.64, -1.64, 0.39, 0.39,
1.64, 1.64, -0.39, -0.39, -0.39, -1.64, -0.39],
[-0.39, 0.39, 0.39, -1.64, 1.64, -1.64, 0.39, 0.39,
1.64, 1.64, -0.39, -0.39, -0.39, -1.64, -0.39],
[-0.39, 0.39, 0.39, -1.64, 1.64, -1.64, 0.39, 0.39, 1.64, 1.64, -0.39, -0.39, 1.64, -1.64, -0.39]],
dtype=np.float)
model_outputs = np.array([24.9, 22.72, 21.04, 16.01, 10.4, 10.04, 8.6, 13.39, 4.69, 8.02, 9.98, 3.75, 1.33, 2.59, 6.37, 9.99],
dtype=np.float)
delta = 2. / 3
actual = compute_elementary_effects(model_inputs, model_outputs, 16, delta)
desired = np.array([[-5.67], [7.18], [1.89], [8.42], [2.93], [3.28], [-3.62], [-7.55],
[-2.51], [5.00], [9.34], [0.54], [5.43], [2.15], [13.05]],
dtype=np.float)
assert_allclose(actual, desired, atol=1e-1)
def test_compute_grouped_elementary_effects():
model_inputs = np.array([[.39, -.39, -1.64, 0.39, -0.39, -0.39, 0.39, 0.39, -1.64, -0.39, 0.39, -1.64, 1.64, 1.64, 1.64],
[-1.64, 1.64, 0.39, -1.64, 1.64, 1.64, -1.64, -1.64, -1.64, 1.64, 0.39, -1.64, 1.64, 1.64, 1.64],
[-1.64, 1.64, 0.39, -1.64, 1.64, 1.64, -1.64, -1.64, 0.39, 1.64, -1.64, 0.39, -.39, -.39, -.39]
])
model_results = np.array([13.85, -10.11, 1.12])
problem = {'names':['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15'],
'bounds':[[]],
'groups':(np.matrix('0,0,0,0,0,0,0,0,1,0,1,1,1,1,1;1,1,1,1,1,1,1,1,0,1,0,0,0,0,0'),
['gp1', 'gp2']),
'num_vars': 15
}
ee = compute_elementary_effects(model_inputs, model_results, 3, 2./3)
mu_star = np.average(np.abs(ee), axis=1)
actual = compute_grouped_metric(mu_star, problem['groups'][0].T)
desired = np.array([16.86, 35.95])
assert_allclose(actual, desired, atol=1e-1)
def test_compute_elementary_effects_small():
'''
Computes elementary effects for two variables,
over six trajectories with four levels.
'''
model_inputs = np.array([[0, 1. / 3], [0, 1], [2. / 3, 1],
[0, 1. / 3], [2. / 3, 1. / 3], [2. / 3, 1],
[2. / 3, 0], [2. / 3, 2. / 3], [0, 2. / 3],
[1. / 3, 1], [1, 1], [1, 1. / 3],
[1. / 3, 1], [1. / 3, 1. / 3], [1, 1. / 3],
[1. / 3, 2. / 3], [1. / 3, 0], [1, 0]],
dtype=np.float)
model_outputs = np.array([0.97, 0.71, 2.39, 0.97, 2.3, 2.39, 1.87, 2.40, 0.87, 2.15, 1.71, 1.54, 2.15, 2.17, 1.54, 2.2, 1.87, 1.0],
dtype=np.float)
delta = 2. / 3
actual = compute_elementary_effects(model_inputs, model_outputs, 3, delta)
desired = np.array(
[[2.52, 2.01, 2.30, -0.66, -0.93, -1.30], [-0.39, 0.13, 0.80, 0.25, -0.02, 0.51]])
assert_allclose(actual, desired, atol=1e-0)
def test_compute_increased_value_for_ee():
up = np.array([[[False, True], [True, False]],
[[True, False], [False, True]],
[[False, True], [False, False]],
[[True, False], [False, False]],
[[False, False], [True, False]],
[[False, False], [True, False]]],
dtype=bool)
lo = np.array([[[False, False], [False, False]],
[[False, False], [False, False]],
[[False, False], [True, False]],
[[False, False], [False, True]],
[[False, True], [False, False]],
[[False, True], [False, False]]],
dtype=bool)
model_outputs = np.array([0.97, 0.71, 2.39, 0.97, 2.3, 2.39, 1.87, 2.40, 0.87, 2.15, 1.71, 1.54, 2.15, 2.17, 1.54, 2.2, 1.87, 1.0],
dtype=np.float)
op_vec = model_outputs.reshape(6, 3)
actual = get_increased_values(op_vec, up, lo)
desired = np.array([[2.39, 2.3, 2.4, 1.71, 1.54, 1.0],
[0.71, 2.39, 2.40, 1.71, 2.15, 2.20]],
dtype=np.float)
assert_allclose(actual, desired, atol=1e-1)
def test_compute_decreased_value_for_ee():
up = np.array([[[False, True], [True, False]],
[[True, False], [False, True]],
[[False, True], [False, False]],
[[True, False], [False, False]],
[[False, False], [True, False]],
[[False, False], [True, False]]],
dtype=bool)
lo = np.array([[[False, False], [False, False]],
[[False, False], [False, False]],
[[False, False], [True, False]],
[[False, False], [False, True]],
[[False, True], [False, False]],
[[False, True], [False, False]]],
dtype=bool)
model_outputs = np.array([0.97, 0.71, 2.39, 0.97, 2.3, 2.39, 1.87, 2.40, 0.87, 2.15, 1.71, 1.54, 2.15, 2.17, 1.54, 2.2, 1.87, 1.0],
dtype=np.float)
op_vec = model_outputs.reshape(6, 3)
actual = get_decreased_values(op_vec, up, lo)
desired = np.array([[0.71, 0.97, 0.87, 2.15, 2.17, 1.87],
[0.97, 2.30, 1.87, 1.54, 2.17, 1.87]],
dtype=np.float)
assert_allclose(actual, desired, atol=1e-1)
def test_compute_grouped_mu_star():
'''
Computes mu_star for 3 variables grouped into 2 groups
There are six trajectories.
'''
group_matrix = np.matrix('1,0;0,1;0,1', dtype=np.int)
ee = np.array([[2.52, 2.01, 2.30, -0.66, -0.93, -1.30],
[-2.00, 0.13, -0.80, 0.25, -0.02, 0.51],
[2.00, -0.13, 0.80, -0.25, 0.02, -0.51]])
mu_star = np.average(np.abs(ee), 1)
actual = compute_grouped_metric(mu_star, group_matrix)
desired = np.array([1.62, 0.62], dtype=np.float)
assert_allclose(actual, desired, rtol=1e-1)
| |
from numpy import ones, ndarray, array, asarray, concatenate, zeros, shape, \
alltrue, equal, divide, arccos, arcsin, arctan, cos, cosh, \
sin, sinh, exp, ceil, floor, fabs, log, log10, sqrt, argmin, \
argmax, argsort, around, absolute, sign, negative, float32
import sys
numericTypes = (int, long, float, complex)
def isnumeric(t):
return isinstance(t, numericTypes)
def time_it():
import time
expr = "ex[:,1:,1:] = ca_x[:,1:,1:] * ex[:,1:,1:]" \
"+ cb_y_x[:,1:,1:] * (hz[:,1:,1:] - hz[:,:-1,1:])" \
"- cb_z_x[:,1:,1:] * (hy[:,1:,1:] - hy[:,1:,:-1])"
ex = ones((10,10,10),dtype=float32)
ca_x = ones((10,10,10),dtype=float32)
cb_y_x = ones((10,10,10),dtype=float32)
cb_z_x = ones((10,10,10),dtype=float32)
hz = ones((10,10,10),dtype=float32)
hy = ones((10,10,10),dtype=float32)
N = 1
t1 = time.time()
for i in range(N):
passed = check_expr(expr,locals())
t2 = time.time()
print 'time per call:', (t2 - t1)/N
print 'passed:', passed
def check_expr(expr,local_vars,global_vars={}):
""" Currently only checks expressions (not suites).
Doesn't check that lhs = rhs. checked by compiled func though
"""
values ={}
#first handle the globals
for var,val in global_vars.items():
if isinstance(val, ndarray):
values[var] = dummy_array(val,name=var)
elif isnumeric(val):
values[var] = val
#now handle the locals
for var,val in local_vars.items():
if isinstance(val, ndarray):
values[var] = dummy_array(val,name=var)
if isnumeric(val):
values[var] = val
exec(expr,values)
try:
exec(expr,values)
except:
try:
eval(expr,values)
except:
return 0
return 1
empty = array(())
empty_slice = slice(None)
def make_same_length(x,y):
try:
Nx = len(x)
except:
Nx = 0
try:
Ny = len(y)
except:
Ny = 0
if Nx == Ny == 0:
return empty,empty
elif Nx == Ny:
return asarray(x),asarray(y)
else:
diff = abs(Nx - Ny)
front = ones(diff, int)
if Nx > Ny:
return asarray(x), concatenate((front,y))
elif Ny > Nx:
return concatenate((front,x)),asarray(y)
def binary_op_size(xx,yy):
""" This returns the resulting size from operating on xx, and yy
with a binary operator. It accounts for broadcasting, and
throws errors if the array sizes are incompatible.
"""
x,y = make_same_length(xx,yy)
res = zeros(len(x))
for i in range(len(x)):
if x[i] == y[i]:
res[i] = x[i]
elif x[i] == 1:
res[i] = y[i]
elif y[i] == 1:
res[i] = x[i]
else:
# offer more information here about which variables.
raise ValueError("frames are not aligned")
return res
class dummy_array(object):
def __init__(self,ary,ary_is_shape = 0,name=None):
self.name = name
if ary_is_shape:
self.shape = ary
#self.shape = asarray(ary)
else:
try:
self.shape = shape(ary)
except:
self.shape = empty
#self.value = ary
def binary_op(self,other):
try:
x = other.shape
except AttributeError:
x = empty
new_shape = binary_op_size(self.shape,x)
return dummy_array(new_shape,1)
def __cmp__(self,other):
# This isn't an exact compare, but does work for ==
# cluge for Numeric
if isnumeric(other):
return 0
if len(self.shape) == len(other.shape) == 0:
return 0
return not alltrue(equal(self.shape,other.shape),axis=0)
def __add__(self,other): return self.binary_op(other)
def __radd__(self,other): return self.binary_op(other)
def __sub__(self,other): return self.binary_op(other)
def __rsub__(self,other): return self.binary_op(other)
def __mul__(self,other): return self.binary_op(other)
def __rmul__(self,other): return self.binary_op(other)
def __div__(self,other): return self.binary_op(other)
def __rdiv__(self,other): return self.binary_op(other)
def __mod__(self,other): return self.binary_op(other)
def __rmod__(self,other): return self.binary_op(other)
def __lshift__(self,other): return self.binary_op(other)
def __rshift__(self,other): return self.binary_op(other)
# unary ops
def __neg__(self,other): return self
def __pos__(self,other): return self
def __abs__(self,other): return self
def __invert__(self,other): return self
# Not sure what to do with coersion ops. Ignore for now.
#
# not currently supported by compiler.
# __divmod__
# __pow__
# __rpow__
# __and__
# __or__
# __xor__
# item access and slicing
def __setitem__(self,indices,val):
#ignore for now
pass
def __len__(self):
return self.shape[0]
def __getslice__(self,i,j):
i = max(i, 0); j = max(j, 0)
return self.__getitem__((slice(i,j),))
def __getitem__(self,indices):
# ayeyaya this is a mess
#print indices, type(indices), indices.shape
if not isinstance(indices, tuple):
indices = (indices,)
if Ellipsis in indices:
raise IndexError("Ellipsis not currently supported")
new_dims = []
dim = 0
for index in indices:
try:
dim_len = self.shape[dim]
except IndexError:
raise IndexError("To many indices specified")
#if (type(index) is SliceType and index.start == index.stop == index.step):
if (index is empty_slice):
slc_len = dim_len
elif isinstance(index, slice):
beg,end,step = index.start,index.stop,index.step
# handle if they are dummy arrays
#if hasattr(beg,'value') and type(beg.value) != ndarray:
# beg = beg.value
#if hasattr(end,'value') and type(end.value) != ndarray:
# end = end.value
#if hasattr(step,'value') and type(step.value) != ndarray:
# step = step.value
if beg is None: beg = 0
if end == sys.maxint or end is None:
end = dim_len
if step is None:
step = 1
if beg < 0: beg += dim_len
if end < 0: end += dim_len
# the following is list like behavior,
# which isn't adhered to by arrays.
# FIX THIS ANOMOLY IN NUMERIC!
if beg < 0: beg = 0
if beg > dim_len: beg = dim_len
if end < 0: end = 0
if end > dim_len: end = dim_len
# This is rubbish.
if beg == end:
beg,end,step = 0,0,1
elif beg >= dim_len and step > 0:
beg,end,step = 0,0,1
#elif index.step > 0 and beg <= end:
elif step > 0 and beg <= end:
pass #slc_len = abs(divide(end-beg-1,step)+1)
# handle [::-1] and [-1::-1] correctly
#elif index.step > 0 and beg > end:
elif step > 0 and beg > end:
beg,end,step = 0,0,1
elif(step < 0 and index.start is None and index.stop is None):
beg,end,step = 0,dim_len,-step
elif(step < 0 and index.start is None):
# +1 because negative stepping is inclusive
beg,end,step = end+1,dim_len,-step
elif(step < 0 and index.stop is None):
beg,end,step = 0,beg+1,-step
elif(step < 0 and beg > end):
beg,end,step = end,beg,-step
elif(step < 0 and beg < end):
beg,end,step = 0,0,-step
slc_len = abs(divide(end-beg-1,step)+1)
new_dims.append(slc_len)
else:
if index < 0: index += dim_len
if index >=0 and index < dim_len:
#this reduces the array dimensions by one
pass
else:
raise IndexError("Index out of range")
dim += 1
new_dims.extend(self.shape[dim:])
if 0 in new_dims:
raise IndexError("Zero length slices not currently supported")
return dummy_array(new_dims,1)
def __repr__(self):
val = str((self.name, str(self.shape)))
return val
def unary(ary):
return ary
def not_implemented(ary):
return ary
#all imported from Numeric and need to be reassigned.
unary_op = [arccos, arcsin, arctan, cos, cosh, sin, sinh,
exp,ceil,floor,fabs,log,log10,sqrt]
unsupported = [argmin,argmax, argsort,around, absolute,sign,negative,floor]
for func in unary_op:
func = unary
for func in unsupported:
func = not_implemented
def reduction(ary,axis=0):
if axis < 0:
axis += len(ary.shape)
if axis < 0 or axis >= len(ary.shape):
raise ValueError("Dimension not in array")
new_dims = list(ary.shape[:axis]) + list(ary.shape[axis+1:])
return dummy_array(new_dims,1)
# functions currently not supported by compiler
# reductions are gonna take some array reordering for the general case,
# so this is gonna take some thought (probably some tree manipulation).
def take(ary,axis=0): raise NotImplemented
# and all the rest
| |
#!/usr/bin/python
import systemutils
import rpc
from lib import flaptor_logging
from lib.monitor import Monitor
from nebu.models import Index, Worker, Deploy
from flaptor.indextank.rpc.ttypes import IndexerStatus, NebuException
import datetime
class DeployPingMonitor(Monitor):
def __init__(self):
super(DeployPingMonitor, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 5
self.fatal_failure_threshold = 20
self.period = 30
def iterable(self):
return (d for d in Deploy.objects.all().select_related('index') if d.is_writable())
def monitor(self, deploy):
deploy.index # so that it fails if the index foreign key is broken
try:
client = rpc.getThriftIndexerClient(deploy.worker.lan_dns, int(deploy.base_port), 5000)
client.ping()
return True
except Exception:
self.logger.exception("Failed to ping deploy %s for index %s", deploy.id, deploy.index.code)
self.err_msg = self.describe_error()
return False
#'A writable deploy [%d] is failing to answer to ping.\n\n%s\n\nEXCEPTION: %s : %s\ntraceback:\n%s' % (deploy.id, index.get_debug_info(), exc_type, exc_value, ''.join(format_tb(exc_traceback)))
def alert_title(self, deploy):
return 'Unable to ping index %s deploy id %d' % (deploy.index.code, deploy.id)
def alert_msg(self, deploy):
return 'A writable deploy [%d] is failing to answer to ping.\n\n%s\n\n%s' % (deploy.id, deploy.index.get_debug_info(), self.err_msg)
class IndexSizeMonitor(Monitor):
def __init__(self):
super(IndexSizeMonitor, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 2
self.fatal_failure_threshold = 5
self.period = 120
def iterable(self):
return (i for i in Index.objects.all() if i.is_ready() and not i.deleted)
def monitor(self, index):
try:
self.logger.debug("Fetching size for index %s" , index.code)
searcher = rpc.get_searcher_client(index, 10000)
current_size = searcher.size()
Index.objects.filter(id=index.id).update(current_docs_number=current_size)
self.logger.info("Updated size for index %s: %d" , index.code, index.current_docs_number)
return True
except Exception:
self.logger.exception("Failed to update size for index %s" , index.code)
self.err_msg = self.describe_error()
return False
def alert_title(self, index):
return "Failed to fetch size for index %s" % index.code
def alert_msg(self, index):
return 'An IndexEngine is failing when attempting to query its size via thrift.\n\n%s\n\n%s' % (index.get_debug_info(), self.err_msg)
class ServiceDeploys(Monitor):
def __init__(self):
super(ServiceDeploys, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 1
self.period = 2
def monitor(self, object):
try:
rpc.get_deploy_manager().service_deploys()
return True
except NebuException, e:
self.nebu_e = e
return False
def alert_title(self, object):
return "Nebu exception"
def alert_msg(self, object):
return self.nebu_e.message
class ServiceWorkers(Monitor):
def __init__(self):
super(ServiceWorkers, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 1
self.period = 30
def iterable(self):
return (w for w in Worker.objects.all() if not w.is_ready())
def monitor(self, worker):
try:
rpc.getThriftWorkerManagerClient('workermanager').update_status(worker.instance_name)
return True
except NebuException, e:
self.nebu_e = e
return False
def alert_title(self, worker):
return "Nebu exception for worker id %d" % worker.id
def alert_msg(self, worker):
return "INFO ABOUT THE WORKER\ninstance id: %s\nwan_dns: %s\nlan_dns: %s\n\nError message: " % (worker.instance_name, worker.wan_dns, worker.lan_dns, self.nebu_e.message)
class WorkerFreeDiskMonitor(Monitor):
def __init__(self, threshold):
super(WorkerFreeDiskMonitor, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 1
self.period = 60
self.threshold = threshold
def get_fs_sizes(self, worker):
controller = rpc.get_worker_controller(worker)
worker_stats = controller.get_worker_mount_stats()
return worker_stats.fs_sizes.items()
def iterable(self):
# generates a list of pairs worker,filesystem with each filesystem in each worker
return [(w,fs) for w in Worker.objects.all() for fs in self.get_fs_sizes(w) if w.is_ready()]
def monitor(self, info):
worker, (fs, (used, available)) = info
self.logger.debug('Checking free space on %s for worker %s', fs, worker.wan_dns)
ratio = float(available) / (available + used)
return ratio * 100 > self.threshold
def alert_title(self, info):
worker, (fs, _) = info
return 'Filesystem %s free space below %d%% for worker id %d' % (fs, self.threshold, worker.id)
def alert_msg(self, info):
worker, (fs, (used, available)) = info
ratio = float(available) / (available + used)
return 'Worker %d\nFilesystem mounted on %s has only %d%% of available space (%d free of %d)\n\nINFO ABOUT THE WORKER\ninstance id: %s\nwan_dns: %s\nlan_dns: %s' % (worker.id, fs, (ratio * 100), available, used, worker.instance_name, worker.wan_dns, worker.lan_dns)
class FrontendFreeDiskMonitor(Monitor):
def __init__(self, threshold):
super(FrontendFreeDiskMonitor, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 1
self.period = 60
self.threshold = threshold
def iterable(self):
# generates a list of pairs worker,filesystem with each filesystem in each worker
return [fs for fs in systemutils.get_available_sizes().items()]
def monitor(self, info):
fs, (used, available) = info
self.logger.debug('Checking free space on %s for the frontend', fs)
ratio = float(available) / (available + used)
return ratio * 100 > self.threshold
def alert_title(self, info):
fs, _ = info
return 'Filesystem %s free space below %d%% for FRONTEND machine' % (fs, self.threshold)
def alert_msg(self, info):
fs, (used, available) = info
ratio = float(available) / (available + used)
return 'Frontend\nFilesystem mounted on %s has only %d%% of available space (%d free of %d)' % (fs, (ratio * 100), available, used)
class IndexStartedMonitor(Monitor):
def __init__(self):
super(IndexStartedMonitor, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 1
self.period = 60
def iterable(self):
return (i for i in Index.objects.all() if not i.is_ready() and not i.is_hibernated() and not i.deleted)
def monitor(self, index):
return datetime.datetime.now() - index.creation_time < datetime.timedelta(minutes=5)
def alert_title(self, index):
return 'Index %s hasn\'t started in at least 5 minutes' % (index.code)
def alert_msg(self, index):
return 'The following index hasn\'t started in more than 5 minutes:\n\n%s' % (index.get_debug_info())
class MoveIncompleteMonitor(Monitor):
def __init__(self):
super(MoveIncompleteMonitor, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 1
self.period = 360
def iterable(self):
return Deploy.objects.filter(status=Deploy.States.moving)
def monitor(self, deploy):
return datetime.datetime.now() - deploy.timestamp < datetime.timedelta(hours=4)
def alert_title(self, deploy):
return 'Index %s has been moving for over 4 hours' % (deploy.index.code)
def alert_msg(self, deploy):
return 'The following index has been moving for more than 4 hours:\n\n%s' % (deploy.index.get_debug_info())
class RecoveryErrorMonitor(Monitor):
'''
Pings RECOVERING indexes, to find out about recovery errors (status=3).
'''
def __init__(self):
super(RecoveryErrorMonitor, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 1
self.period = 360
def iterable(self):
return Deploy.objects.filter(status=Deploy.States.recovering)
def monitor(self, deploy):
# get the current recovery status
indexer = rpc.getThriftIndexerClient(deploy.worker.lan_dns, int(deploy.base_port), 10000)
indexer_status = indexer.getStatus()
# complain only if an error arised
return indexer_status != IndexerStatus.error
def alert_title(self, deploy):
return 'Recovery failed for index %s' % (deploy.index.code)
def alert_msg(self, deploy):
return 'The following index has a recovering deploy that failed:\n\n%s' % (deploy.index.get_debug_info())
class DeployInitializedMonitor(Monitor):
def __init__(self):
super(DeployInitializedMonitor, self).__init__(pagerduty_email='index-monitor@flaptor.pagerduty.com')
self.failure_threshold = 1
self.period = 20
def iterable(self):
return Deploy.objects.filter(status=Deploy.States.initializing)
def monitor(self, deploy):
return datetime.datetime.now() - deploy.timestamp < datetime.timedelta(seconds=20)
def alert_title(self, deploy):
return 'Deploy %d has been initializing for over 20 seconds' % (deploy.id)
def alert_msg(self, deploy):
return 'A deploy has been started more than 20 seconds ago (i.e. startIndex.sh has been executed) and it\'s still not responding to its thrift interface.\n\nDeploy id: %d\n\nIndex info:\n%s' % (deploy.id, deploy.index.get_debug_info())
if __name__ == '__main__':
DeployPingMonitor().start()
IndexSizeMonitor().start()
ServiceDeploys().start()
ServiceWorkers().start()
WorkerFreeDiskMonitor(15).start()
WorkerFreeDiskMonitor(10).start()
WorkerFreeDiskMonitor(5).start()
FrontendFreeDiskMonitor(15).start()
FrontendFreeDiskMonitor(10).start()
FrontendFreeDiskMonitor(5).start()
IndexStartedMonitor().start()
MoveIncompleteMonitor().start()
RecoveryErrorMonitor().start()
DeployInitializedMonitor().start()
| |
import operator
import unittest
import inspect
from test import test_support
class Seq1:
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class Seq2(object):
def __init__(self, lst):
self.lst = lst
def __len__(self):
return len(self.lst)
def __getitem__(self, i):
return self.lst[i]
def __add__(self, other):
return self.lst + other.lst
def __mul__(self, other):
return self.lst * other
def __rmul__(self, other):
return other * self.lst
class OperatorTestCase(unittest.TestCase):
def test_missing_module_attribute(self):
skip = {'__subclasshook__', '__new__'}
def _predicate(member):
return inspect.isbuiltin(member) and member.__name__ not in skip
objects = inspect.getmembers(operator, predicate=_predicate)
for _, value in objects:
self.assertEqual(value.__module__, "operator", value)
def test_lt(self):
self.failUnlessRaises(TypeError, operator.lt)
self.failUnlessRaises(TypeError, operator.lt, 1j, 2j)
self.failIf(operator.lt(1, 0))
self.failIf(operator.lt(1, 0.0))
self.failIf(operator.lt(1, 1))
self.failIf(operator.lt(1, 1.0))
self.failUnless(operator.lt(1, 2))
self.failUnless(operator.lt(1, 2.0))
def test_le(self):
self.failUnlessRaises(TypeError, operator.le)
self.failUnlessRaises(TypeError, operator.le, 1j, 2j)
self.failIf(operator.le(1, 0))
self.failIf(operator.le(1, 0.0))
self.failUnless(operator.le(1, 1))
self.failUnless(operator.le(1, 1.0))
self.failUnless(operator.le(1, 2))
self.failUnless(operator.le(1, 2.0))
def test_eq(self):
class C(object):
def __eq__(self, other):
raise SyntaxError
__hash__ = None # Silence Py3k warning
self.failUnlessRaises(TypeError, operator.eq)
self.failUnlessRaises(SyntaxError, operator.eq, C(), C())
self.failIf(operator.eq(1, 0))
self.failIf(operator.eq(1, 0.0))
self.failUnless(operator.eq(1, 1))
self.failUnless(operator.eq(1, 1.0))
self.failIf(operator.eq(1, 2))
self.failIf(operator.eq(1, 2.0))
def test_ne(self):
class C(object):
def __ne__(self, other):
raise SyntaxError
self.failUnlessRaises(TypeError, operator.ne)
self.failUnlessRaises(SyntaxError, operator.ne, C(), C())
self.failUnless(operator.ne(1, 0))
self.failUnless(operator.ne(1, 0.0))
self.failIf(operator.ne(1, 1))
self.failIf(operator.ne(1, 1.0))
self.failUnless(operator.ne(1, 2))
self.failUnless(operator.ne(1, 2.0))
def test_ge(self):
self.failUnlessRaises(TypeError, operator.ge)
self.failUnlessRaises(TypeError, operator.ge, 1j, 2j)
self.failUnless(operator.ge(1, 0))
self.failUnless(operator.ge(1, 0.0))
self.failUnless(operator.ge(1, 1))
self.failUnless(operator.ge(1, 1.0))
self.failIf(operator.ge(1, 2))
self.failIf(operator.ge(1, 2.0))
def test_gt(self):
self.failUnlessRaises(TypeError, operator.gt)
self.failUnlessRaises(TypeError, operator.gt, 1j, 2j)
self.failUnless(operator.gt(1, 0))
self.failUnless(operator.gt(1, 0.0))
self.failIf(operator.gt(1, 1))
self.failIf(operator.gt(1, 1.0))
self.failIf(operator.gt(1, 2))
self.failIf(operator.gt(1, 2.0))
def test_abs(self):
self.failUnlessRaises(TypeError, operator.abs)
self.failUnlessRaises(TypeError, operator.abs, None)
self.failUnless(operator.abs(-1) == 1)
self.failUnless(operator.abs(1) == 1)
def test_add(self):
self.failUnlessRaises(TypeError, operator.add)
self.failUnlessRaises(TypeError, operator.add, None, None)
self.failUnless(operator.add(3, 4) == 7)
def test_bitwise_and(self):
self.failUnlessRaises(TypeError, operator.and_)
self.failUnlessRaises(TypeError, operator.and_, None, None)
self.failUnless(operator.and_(0xf, 0xa) == 0xa)
def test_concat(self):
self.failUnlessRaises(TypeError, operator.concat)
self.failUnlessRaises(TypeError, operator.concat, None, None)
self.failUnless(operator.concat('py', 'thon') == 'python')
self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
self.failUnless(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
self.failUnless(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
if not test_support.is_jython:
# Jython concat is add
self.failUnlessRaises(TypeError, operator.concat, 13, 29)
def test_countOf(self):
self.failUnlessRaises(TypeError, operator.countOf)
self.failUnlessRaises(TypeError, operator.countOf, None, None)
self.failUnless(operator.countOf([1, 2, 1, 3, 1, 4], 3) == 1)
self.failUnless(operator.countOf([1, 2, 1, 3, 1, 4], 5) == 0)
def test_delitem(self):
a = [4, 3, 2, 1]
self.failUnlessRaises(TypeError, operator.delitem, a)
self.failUnlessRaises(TypeError, operator.delitem, a, None)
self.failUnless(operator.delitem(a, 1) is None)
self.assert_(a == [4, 2, 1])
def test_delslice(self):
a = range(10)
self.failUnlessRaises(TypeError, operator.delslice, a)
self.failUnlessRaises(TypeError, operator.delslice, a, None, None)
self.failUnless(operator.delslice(a, 2, 8) is None)
self.assert_(a == [0, 1, 8, 9])
operator.delslice(a, 0, test_support.MAX_Py_ssize_t)
self.assert_(a == [])
def test_div(self):
self.failUnlessRaises(TypeError, operator.div, 5)
self.failUnlessRaises(TypeError, operator.div, None, None)
self.failUnless(operator.floordiv(5, 2) == 2)
def test_floordiv(self):
self.failUnlessRaises(TypeError, operator.floordiv, 5)
self.failUnlessRaises(TypeError, operator.floordiv, None, None)
self.failUnless(operator.floordiv(5, 2) == 2)
def test_truediv(self):
self.failUnlessRaises(TypeError, operator.truediv, 5)
self.failUnlessRaises(TypeError, operator.truediv, None, None)
self.failUnless(operator.truediv(5, 2) == 2.5)
def test_getitem(self):
a = range(10)
self.failUnlessRaises(TypeError, operator.getitem)
self.failUnlessRaises(TypeError, operator.getitem, a, None)
self.failUnless(operator.getitem(a, 2) == 2)
def test_getslice(self):
a = range(10)
self.failUnlessRaises(TypeError, operator.getslice)
self.failUnlessRaises(TypeError, operator.getslice, a, None, None)
self.failUnless(operator.getslice(a, 4, 6) == [4, 5])
b = operator.getslice(a, 0, test_support.MAX_Py_ssize_t)
self.assert_(b == a)
def test_indexOf(self):
self.failUnlessRaises(TypeError, operator.indexOf)
self.failUnlessRaises(TypeError, operator.indexOf, None, None)
self.failUnless(operator.indexOf([4, 3, 2, 1], 3) == 1)
self.assertRaises(ValueError, operator.indexOf, [4, 3, 2, 1], 0)
def test_invert(self):
self.failUnlessRaises(TypeError, operator.invert)
self.failUnlessRaises(TypeError, operator.invert, None)
self.failUnless(operator.inv(4) == -5)
def test_isCallable(self):
self.failUnlessRaises(TypeError, operator.isCallable)
class C:
pass
def check(self, o, v):
self.assertEqual(operator.isCallable(o), v)
with test_support.check_py3k_warnings():
self.assertEqual(callable(o), v)
check(self, 4, 0)
check(self, operator.isCallable, 1)
check(self, C, 1)
check(self, C(), 0)
def test_isMappingType(self):
self.failUnlessRaises(TypeError, operator.isMappingType)
self.failIf(operator.isMappingType(1))
self.failIf(operator.isMappingType(operator.isMappingType))
self.failUnless(operator.isMappingType(operator.__dict__))
self.failUnless(operator.isMappingType({}))
def test_isNumberType(self):
self.failUnlessRaises(TypeError, operator.isNumberType)
self.failUnless(operator.isNumberType(8))
self.failUnless(operator.isNumberType(8j))
self.failUnless(operator.isNumberType(8L))
self.failUnless(operator.isNumberType(8.3))
self.failIf(operator.isNumberType(dir()))
def test_isSequenceType(self):
self.failUnlessRaises(TypeError, operator.isSequenceType)
self.failUnless(operator.isSequenceType(dir()))
self.failUnless(operator.isSequenceType(()))
self.failUnless(operator.isSequenceType(xrange(10)))
self.failUnless(operator.isSequenceType('yeahbuddy'))
self.failIf(operator.isSequenceType(3))
class Dict(dict): pass
self.failIf(operator.isSequenceType(Dict()))
def test_lshift(self):
self.failUnlessRaises(TypeError, operator.lshift)
self.failUnlessRaises(TypeError, operator.lshift, None, 42)
self.failUnless(operator.lshift(5, 1) == 10)
self.failUnless(operator.lshift(5, 0) == 5)
self.assertRaises(ValueError, operator.lshift, 2, -1)
def test_mod(self):
self.failUnlessRaises(TypeError, operator.mod)
self.failUnlessRaises(TypeError, operator.mod, None, 42)
self.failUnless(operator.mod(5, 2) == 1)
def test_mul(self):
self.failUnlessRaises(TypeError, operator.mul)
self.failUnlessRaises(TypeError, operator.mul, None, None)
self.failUnless(operator.mul(5, 2) == 10)
def test_neg(self):
self.failUnlessRaises(TypeError, operator.neg)
self.failUnlessRaises(TypeError, operator.neg, None)
self.failUnless(operator.neg(5) == -5)
self.failUnless(operator.neg(-5) == 5)
self.failUnless(operator.neg(0) == 0)
self.failUnless(operator.neg(-0) == 0)
def test_bitwise_or(self):
self.failUnlessRaises(TypeError, operator.or_)
self.failUnlessRaises(TypeError, operator.or_, None, None)
self.failUnless(operator.or_(0xa, 0x5) == 0xf)
def test_pos(self):
self.failUnlessRaises(TypeError, operator.pos)
self.failUnlessRaises(TypeError, operator.pos, None)
self.failUnless(operator.pos(5) == 5)
self.failUnless(operator.pos(-5) == -5)
self.failUnless(operator.pos(0) == 0)
self.failUnless(operator.pos(-0) == 0)
def test_pow(self):
self.failUnlessRaises(TypeError, operator.pow)
self.failUnlessRaises(TypeError, operator.pow, None, None)
self.failUnless(operator.pow(3,5) == 3**5)
self.failUnless(operator.__pow__(3,5) == 3**5)
self.assertRaises(TypeError, operator.pow, 1)
self.assertRaises(TypeError, operator.pow, 1, 2, 3)
def test_repeat(self):
a = range(3)
self.failUnlessRaises(TypeError, operator.repeat)
self.failUnlessRaises(TypeError, operator.repeat, a, None)
self.failUnless(operator.repeat(a, 2) == a+a)
self.failUnless(operator.repeat(a, 1) == a)
self.failUnless(operator.repeat(a, 0) == [])
a = (1, 2, 3)
self.failUnless(operator.repeat(a, 2) == a+a)
self.failUnless(operator.repeat(a, 1) == a)
self.failUnless(operator.repeat(a, 0) == ())
a = '123'
self.failUnless(operator.repeat(a, 2) == a+a)
self.failUnless(operator.repeat(a, 1) == a)
self.failUnless(operator.repeat(a, 0) == '')
a = Seq1([4, 5, 6])
self.failUnless(operator.repeat(a, 2) == [4, 5, 6, 4, 5, 6])
self.failUnless(operator.repeat(a, 1) == [4, 5, 6])
self.failUnless(operator.repeat(a, 0) == [])
a = Seq2([4, 5, 6])
self.failUnless(operator.repeat(a, 2) == [4, 5, 6, 4, 5, 6])
self.failUnless(operator.repeat(a, 1) == [4, 5, 6])
self.failUnless(operator.repeat(a, 0) == [])
if not test_support.is_jython:
# Jython repeat is mul
self.failUnlessRaises(TypeError, operator.repeat, 6, 7)
def test_rshift(self):
self.failUnlessRaises(TypeError, operator.rshift)
self.failUnlessRaises(TypeError, operator.rshift, None, 42)
self.failUnless(operator.rshift(5, 1) == 2)
self.failUnless(operator.rshift(5, 0) == 5)
self.assertRaises(ValueError, operator.rshift, 2, -1)
def test_contains(self):
self.assertRaises(TypeError, operator.contains)
self.assertRaises(TypeError, operator.contains, None, None)
self.assertTrue(operator.contains(range(4), 2))
self.assertFalse(operator.contains(range(4), 5))
self.assertTrue(operator.sequenceIncludes(range(4), 2))
self.assertFalse(operator.sequenceIncludes(range(4), 5))
def test_setitem(self):
a = range(3)
self.failUnlessRaises(TypeError, operator.setitem, a)
self.failUnlessRaises(TypeError, operator.setitem, a, None, None)
self.failUnless(operator.setitem(a, 0, 2) is None)
self.assert_(a == [2, 1, 2])
self.assertRaises(IndexError, operator.setitem, a, 4, 2)
def test_setslice(self):
a = range(4)
self.failUnlessRaises(TypeError, operator.setslice, a)
self.failUnlessRaises(TypeError, operator.setslice, a, None, None, None)
self.failUnless(operator.setslice(a, 1, 3, [2, 1]) is None)
self.assert_(a == [0, 2, 1, 3])
operator.setslice(a, 0, test_support.MAX_Py_ssize_t, [])
self.assert_(a == [])
def test_sub(self):
self.failUnlessRaises(TypeError, operator.sub)
self.failUnlessRaises(TypeError, operator.sub, None, None)
self.failUnless(operator.sub(5, 2) == 3)
def test_truth(self):
class C(object):
def __nonzero__(self):
raise SyntaxError
self.failUnlessRaises(TypeError, operator.truth)
self.failUnlessRaises(SyntaxError, operator.truth, C())
self.failUnless(operator.truth(5))
self.failUnless(operator.truth([0]))
self.failIf(operator.truth(0))
self.failIf(operator.truth([]))
def test_bitwise_xor(self):
self.failUnlessRaises(TypeError, operator.xor)
self.failUnlessRaises(TypeError, operator.xor, None, None)
self.failUnless(operator.xor(0xb, 0xc) == 0x7)
def test_is(self):
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.failUnlessRaises(TypeError, operator.is_)
self.failUnless(operator.is_(a, b))
self.failIf(operator.is_(a,c))
def test_is_not(self):
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.failUnlessRaises(TypeError, operator.is_not)
self.failIf(operator.is_not(a, b))
self.failUnless(operator.is_not(a,c))
def test_attrgetter(self):
class A:
pass
a = A()
a.name = 'arthur'
f = operator.attrgetter('name')
self.assertEqual(f(a), 'arthur')
f = operator.attrgetter('rank')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter(2)
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, operator.attrgetter)
# multiple gets
record = A()
record.x = 'X'
record.y = 'Y'
record.z = 'Z'
self.assertEqual(operator.attrgetter('x','z','y')(record), ('X', 'Z', 'Y'))
self.assertRaises(TypeError, operator.attrgetter('x', (), 'y'), record)
class C(object):
def __getattr__(self, name):
raise SyntaxError
self.failUnlessRaises(SyntaxError, operator.attrgetter('foo'), C())
# recursive gets
a = A()
a.name = 'arthur'
a.child = A()
a.child.name = 'thomas'
f = operator.attrgetter('child.name')
self.assertEqual(f(a), 'thomas')
self.assertRaises(AttributeError, f, a.child)
f = operator.attrgetter('name', 'child.name')
self.assertEqual(f(a), ('arthur', 'thomas'))
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertRaises(AttributeError, f, a)
a.child.child = A()
a.child.child.name = 'johnson'
f = operator.attrgetter('child.child.name')
self.assertEqual(f(a), 'johnson')
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertEqual(f(a), ('arthur', 'thomas', 'johnson'))
def test_itemgetter(self):
a = 'ABCDE'
f = operator.itemgetter(2)
self.assertEqual(f(a), 'C')
f = operator.itemgetter(10)
self.assertRaises(IndexError, f, a)
class C(object):
def __getitem__(self, name):
raise SyntaxError
self.failUnlessRaises(SyntaxError, operator.itemgetter(42), C())
f = operator.itemgetter('name')
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, operator.itemgetter)
d = dict(key='val')
f = operator.itemgetter('key')
self.assertEqual(f(d), 'val')
f = operator.itemgetter('nonkey')
self.assertRaises(KeyError, f, d)
# example used in the docs
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
self.assertEqual(map(getcount, inventory), [3, 2, 5, 1])
self.assertEqual(sorted(inventory, key=getcount),
[('orange', 1), ('banana', 2), ('apple', 3), ('pear', 5)])
# multiple gets
data = map(str, range(20))
self.assertEqual(operator.itemgetter(2,10,5)(data), ('2', '10', '5'))
self.assertRaises(TypeError, operator.itemgetter(2, 'x', 5), data)
def test_methodcaller(self):
self.assertRaises(TypeError, operator.methodcaller)
class A:
def foo(self, *args, **kwds):
return args[0] + args[1]
def bar(self, f=42):
return f
a = A()
f = operator.methodcaller('foo')
self.assertRaises(IndexError, f, a)
f = operator.methodcaller('foo', 1, 2)
self.assertEquals(f(a), 3)
f = operator.methodcaller('bar')
self.assertEquals(f(a), 42)
self.assertRaises(TypeError, f, a, a)
f = operator.methodcaller('bar', f=5)
self.assertEquals(f(a), 5)
def test_inplace(self):
class C(object):
def __iadd__ (self, other): return "iadd"
def __iand__ (self, other): return "iand"
def __idiv__ (self, other): return "idiv"
def __ifloordiv__(self, other): return "ifloordiv"
def __ilshift__ (self, other): return "ilshift"
def __imod__ (self, other): return "imod"
def __imul__ (self, other): return "imul"
def __ior__ (self, other): return "ior"
def __ipow__ (self, other): return "ipow"
def __irshift__ (self, other): return "irshift"
def __isub__ (self, other): return "isub"
def __itruediv__ (self, other): return "itruediv"
def __ixor__ (self, other): return "ixor"
def __getitem__(self, other): return 5 # so that C is a sequence
c = C()
self.assertEqual(operator.iadd (c, 5), "iadd")
self.assertEqual(operator.iand (c, 5), "iand")
self.assertEqual(operator.idiv (c, 5), "idiv")
self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
self.assertEqual(operator.ilshift (c, 5), "ilshift")
self.assertEqual(operator.imod (c, 5), "imod")
self.assertEqual(operator.imul (c, 5), "imul")
self.assertEqual(operator.ior (c, 5), "ior")
self.assertEqual(operator.ipow (c, 5), "ipow")
self.assertEqual(operator.irshift (c, 5), "irshift")
self.assertEqual(operator.isub (c, 5), "isub")
self.assertEqual(operator.itruediv (c, 5), "itruediv")
self.assertEqual(operator.ixor (c, 5), "ixor")
self.assertEqual(operator.iconcat (c, c), "iadd")
self.assertEqual(operator.irepeat (c, 5), "imul")
self.assertEqual(operator.__iadd__ (c, 5), "iadd")
self.assertEqual(operator.__iand__ (c, 5), "iand")
self.assertEqual(operator.__idiv__ (c, 5), "idiv")
self.assertEqual(operator.__ifloordiv__(c, 5), "ifloordiv")
self.assertEqual(operator.__ilshift__ (c, 5), "ilshift")
self.assertEqual(operator.__imod__ (c, 5), "imod")
self.assertEqual(operator.__imul__ (c, 5), "imul")
self.assertEqual(operator.__ior__ (c, 5), "ior")
self.assertEqual(operator.__ipow__ (c, 5), "ipow")
self.assertEqual(operator.__irshift__ (c, 5), "irshift")
self.assertEqual(operator.__isub__ (c, 5), "isub")
self.assertEqual(operator.__itruediv__ (c, 5), "itruediv")
self.assertEqual(operator.__ixor__ (c, 5), "ixor")
self.assertEqual(operator.__iconcat__ (c, c), "iadd")
self.assertEqual(operator.__irepeat__ (c, 5), "imul")
def test_main(verbose=None):
import sys
test_classes = (
OperatorTestCase,
)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
if __name__ == "__main__":
test_main(verbose=True)
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, mockrobiota development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from os.path import join, exists
from os import makedirs
import re
import csv
import click
def add_lists(l1, l2):
newlist = [sum(tup) for tup in zip(l1, l2)]
return newlist
def choose_taxonomy(query_name, query_set, taxa_fp):
print('\n\n{0} matches more than one unique taxonomy.'.format(query_name))
print('Choose the valid taxonomy from the list below:\n')
species_list = list(query_set)
for num, item in enumerate(species_list):
print(num, item)
selection = input('\n\nChoose taxonomy number or "n" if none of these: ')
if selection == 'n':
full = manual_search(query_name, taxa_fp)
else:
selection = int(selection)
full = species_list[selection]
return full
def manual_search(query_name, taxa_fp):
print('\n\n{0} has no matches to {1}.'.format(query_name, taxa_fp))
print('Perform a manual search of your reference database to')
print('match the nearest basal lineage.')
print('\nEnter the correct taxonomy for the basal lineage here:')
lineage = input('> ')
return lineage
def parse_taxonomy_file(source):
'generate dict of {name: (genus, species, abundances)}'
sample_list = source.readline().strip().split('\t')[1:]
taxa = {}
for l in source:
# convert abundances to float
abundances = list(map(float, l.strip().split('\t')[1:]))
name = l.strip().split('\t')[0]
# level labels (e.g., Silva's 'D_11__') can confabulate this.
# Hence, do split on '__' instead of sep to remove level labels
taxon = re.split(' |_', name.split(';')[-1])[0:2]
if name not in taxa.keys():
taxa[name] = (taxon, abundances)
else:
# if species is replicated, collapse abundances
taxa[name] = (taxon, add_lists(taxa[name][1], abundances))
return sample_list, taxa
def find_matching_taxonomies(sample_list, taxa, ref_taxa, sep, gen, sp,
taxa_fp):
species_match = 0
genus_match = 0
family_match = 0
no_match = 0
count = len(taxa)
duplicates = []
seq_ids = dict()
new_taxa = dict()
for name, t in taxa.items():
species_set = set()
genus_set = set()
match = 'None'
# search for match at genus, then species level
for full, partial in ref_taxa.items():
if t[0][0] in partial[0]:
if t[0][1] in partial[1]:
match = 'species'
species_set.add(full)
if full not in seq_ids:
seq_ids[full] = [partial[2]]
else:
seq_ids[full].append(partial[2])
elif match != 'species':
match = 'genus'
genus_set.add(sep.join(full.split(sep)[:-1]) + sep + sp)
# If no species or genus matches, make attempt at family level
if match == 'None':
if t[0][0].endswith('er'):
family = '{0}iaceae'.format(t[0][0])
elif t[0][0].endswith('ma'):
family = t[0][0] + 'taceae'
elif t[0][0].endswith('a'):
family = t[0][0] + 'ceae'
elif t[0][0].endswith('myces'):
family = t[0][0][:-1] + 'taceae'
elif t[0][0].endswith('es'):
family = t[0][0][:-2] + 'aceae'
elif t[0][0].endswith('thece'):
family = t[0][0][:-1] + 'aceae'
elif t[0][0].endswith('stis'):
family = t[0][0][:-2] + 'aceae'
elif t[0][0].endswith('as') or t[0][0].endswith('is'):
family = t[0][0][:-1] + 'daceae'
elif t[0][0].endswith('us') or t[0][0].endswith('um'):
family = t[0][0][:-2] + 'aceae'
elif t[0][0].endswith('io'):
family = t[0][0] + 'naceae'
# Homoeothrix Crenothrix Erysipelothrix Thiothrix
elif t[0][0].endswith('thrix'):
family = t[0][0][:-4] + 'richaceae'
# Cyanothrix Tolypothrix
elif t[0][0].endswith('Cyanothrix') or t[0][0].endswith('pothrix'):
family = t[0][0][:-1] + 'chaceae'
elif t[0][0].endswith('ex'):
family = t[0][0][:-2] + 'icaceae'
else:
family = t[0][0] + 'aceae'
for full in ref_taxa.keys():
if family in full.split(sep)[-3]:
match = 'family'
family = sep.join([sep.join(full.split(sep)[:-2]),
gen, sp])
print('\n\n', name, ' nearest match to family level:')
print(family, '\n\n')
approval = input('Do you approve? (y/n): ')
if approval == 'y':
break
# now add match to new_taxa
if match == 'species':
species_match += 1
if len(species_set) > 1:
species = choose_taxonomy(name, species_set, taxa_fp)
else:
species = list(species_set)[0]
if species not in new_taxa.keys():
new_taxa[species] = ([name], t[1])
else:
# if species is replicated, collapse abundances
new_taxa[species] = (new_taxa[species][0] + [name],
add_lists(new_taxa[species][1], t[1]))
duplicates.append((name, species))
elif match == 'genus':
genus_match += 1
if len(genus_set) > 1:
genus = choose_taxonomy(name, genus_set, taxa_fp)
else:
genus = list(genus_set)[0]
if genus not in new_taxa.keys():
new_taxa[genus] = ([name], t[1])
else:
# if genus is replicated, collapse abundances
new_taxa[genus] = (new_taxa[genus][0] + [name],
add_lists(new_taxa[genus][1], t[1]))
duplicates.append((name, genus))
elif match == 'family':
family_match += 1
if family not in new_taxa.keys():
new_taxa[family] = ([name], t[1])
else:
# if genus is replicated, collapse abundances
new_taxa[family] = (new_taxa[family][0] + [name],
add_lists(new_taxa[family][1], t[1]))
duplicates.append((name, family))
# if failed, user needs to manually search and input new string
else:
no_match += 1
lineage = manual_search(name, taxa_fp)
if lineage not in new_taxa.keys():
new_taxa[lineage] = ([name], t[1])
else:
# if genus is replicated, collapse abundances
new_taxa[lineage] = (new_taxa[lineage][0] + [name],
add_lists(new_taxa[lineage][1], t[1]))
duplicates.append((name, lineage))
# Print results
print('{0} species-level matches ({1:.1f}%)'.format(
species_match, species_match/count*100))
print('{0} genus-level matches ({1:.1f}%)'.format(genus_match,
genus_match/count*100))
if family_match > 0:
print('{0} family-level matches ({1:.1f}%)'.format(
family_match, family_match/count*100))
if no_match > 0:
print('{0} FAILURES ({1:.1f}%)'.format(no_match, no_match/count*100))
if len(duplicates) > 0:
print('\n{0} duplicates:'.format(len(duplicates)))
for dup in duplicates:
print('{0}\t{1}'.format(dup[0], dup[1]))
return duplicates, seq_ids, new_taxa
def print_warning():
print('\n\nWARNING: it is your responsibility to ensure the accuracy of')
print('all output files. Manually review the expected-taxonomy.tsv to')
print('ensure that (1) all taxonomy strings are accurately represented')
print('and (2) all relative abundances sum to 1.0')
@click.command()
@click.option('-i', '--infile', type=click.File('r'), required=True,
help='tab-separated list of genus/species names and '
'[optionally] relative abundances in format:\n'
'Taxonomy Sample1\n'
'Lactobacillus plantarum 0.5\n'
'Pediococcus damnosus 0.5\n')
@click.option('-o', '--outdir', required=True,
type=click.Path(file_okay=False, readable=False),
help='directory in which to write annotated taxonomy file')
@click.option('-r', '--ref-taxa', type=click.File('r'), required=True,
help='tab-separated list of semicolon-delimited taxonomy '
'strings associated with reference sequences. In format:\n'
'seqID taxonom\n'
'0001 kingdom;phylum;class;order;family;genus;species')
@click.option('-p', '--separator', default=';',
help='taxonomy strings are separated with this string pattern.')
@click.option('-g', '--genus', default=' g__',
help='Placeholder to use for taxa that have no genus-level match'
' in reference taxonomy file. Should match the conventions '
'that are used in that reference taxonomy file.')
@click.option('-s', '--species', default=' s__',
help='Placeholder to use for taxa that have no species-level '
'match in reference taxonomy file. Should match the conventions '
'that are used in that reference taxonomy file.')
@click.option('-d', '--identifiers', default=False,
help='Option to allow writing database identifiers for matching '
'reference taxonomies. Will write one database identifier per'
'taxonomy. Deprecating in favor of database-identifiers.py.')
def main(infile, outdir, ref_taxa, separator, genus, species, identifiers):
'''Generate full taxonomy strings from a reference database, given
a list of "source" genus and species names.
'''
sample_list, taxa = parse_taxonomy_file(infile)
# parse ref taxonomy
ref = {l.strip().split('\t')[1]: (
l.strip().split('\t')[1].split(separator)[-2],
l.strip().split('\t')[1].split(separator)[-1],
l.strip().split('\t')[0]
) for l in ref_taxa}
duplicates, seq_ids, new_taxa = \
find_matching_taxonomies(sample_list, taxa, ref, separator, genus,
species, ref_taxa.name)
# Write to file
if not exists(outdir):
makedirs(outdir)
with open(join(outdir, 'expected-taxonomy.tsv'), "w") as dest:
dest.write('Taxonomy\t{0}\n'.format('\t'.join(sample_list)))
for name, t in new_taxa.items():
abundances = ["{:.10f}".format(n) for n in t[1]]
dest.write('{0}\t{1}\n'.format(name, '\t'.join(abundances)))
# write out one database identifier for each taxonomy string
if identifiers:
with open(join(outdir, 'database-identifiers.tsv'), "w") as dest:
for t, seq_id in seq_ids.items():
dest.write('{0}\t{1}\n'.format(t, '\t'.join(seq_id)))
print_warning()
@click.command()
@click.option('-i', '--infile', type=click.File('r'), required=True,
help='tab-separated list of genus/species names and '
'[optionally] relative abundances in format:\n'
'Taxonomy Sample1\n'
'Lactobacillus plantarum 0.5\n'
'Pediococcus damnosus 0.5\n')
@click.option('-e', '--expected-taxonomy', type=click.File('r'),
required=True,
help='tab-separated list of genus/species names and '
'[optionally] relative abundances. Result of previous call to '
'autoannotate')
@click.option('-o', '--outdir', required=True,
type=click.Path(file_okay=False, readable=False),
help='directory in which to write the taxonomy mapping file')
@click.option('-p', '--separator', default=';',
help='taxonomy strings are separated with this string pattern.')
@click.option('-g', '--genus', default=' g__',
help='Placeholder to use for taxa that have no genus-level match'
' in reference taxonomy file. Should match the conventions '
'that are used in that reference taxonomy file.')
@click.option('-s', '--species', default=' s__',
help='Placeholder to use for taxa that have no species-level '
'match in reference taxonomy file. Should match the conventions '
'that are used in that reference taxonomy file.')
def annotate_sequence_ids(infile, expected_taxonomy, outdir, separator,
genus, species):
'Reprocess the expected taxonomy to explicitly classify each sequence'
sample_list, taxa = parse_taxonomy_file(infile)
# parse expected taxonomy
reader = csv.reader(expected_taxonomy, delimiter='\t')
next(reader)
expected = {r[0]: r[0].split(separator)[-2:]+[0] for r in reader}
_, _, new_taxa = find_matching_taxonomies(sample_list, taxa, expected,
separator, genus, species,
expected_taxonomy.name)
# Write to file
if not exists(outdir):
makedirs(outdir)
est_filename = join(outdir, 'expected-sequence-taxonomies.tsv')
with open(est_filename, "w") as dest:
writer = csv.writer(dest, delimiter='\t')
writer.writerow(['Taxonomy', 'Standard Taxonomy'])
for name, ts in new_taxa.items():
for t in ts[0]:
writer.writerow([t, name])
print_warning()
if __name__ == '__main__':
main()
| |
"""Adjacency List"""
from django.core import serializers
from django.db import models
from django.utils.translation import gettext_noop as _
from treebeard.exceptions import InvalidMoveToDescendant, NodeAlreadySaved
from treebeard.models import Node
def get_result_class(cls):
"""
For the given model class, determine what class we should use for the
nodes returned by its tree methods (such as get_children).
Usually this will be trivially the same as the initial model class,
but there are special cases when model inheritance is in use:
* If the model extends another via multi-table inheritance, we need to
use whichever ancestor originally implemented the tree behaviour (i.e.
the one which defines the 'parent' field). We can't use the
subclass, because it's not guaranteed that the other nodes reachable
from the current one will be instances of the same subclass.
* If the model is a proxy model, the returned nodes should also use
the proxy class.
"""
base_class = cls._meta.get_field('parent').model
if cls._meta.proxy_for_model == base_class:
return cls
else:
return base_class
class AL_NodeManager(models.Manager):
"""Custom manager for nodes in an Adjacency List tree."""
def get_queryset(self):
"""Sets the custom queryset as the default."""
if self.model.node_order_by:
order_by = ['parent'] + list(self.model.node_order_by)
else:
order_by = ['parent', 'sib_order']
return super().get_queryset().order_by(*order_by)
class AL_Node(Node):
"""Abstract model to create your own Adjacency List Trees."""
objects = AL_NodeManager()
node_order_by = None
@classmethod
def add_root(cls, **kwargs):
"""Adds a root node to the tree."""
if len(kwargs) == 1 and 'instance' in kwargs:
# adding the passed (unsaved) instance to the tree
newobj = kwargs['instance']
if not newobj._state.adding:
raise NodeAlreadySaved("Attempted to add a tree node that is "\
"already in the database")
else:
newobj = cls(**kwargs)
newobj._cached_depth = 1
if not cls.node_order_by:
try:
max = get_result_class(cls).objects.filter(
parent__isnull=True).order_by(
'sib_order').reverse()[0].sib_order
except IndexError:
max = 0
newobj.sib_order = max + 1
newobj.save()
return newobj
@classmethod
def get_root_nodes(cls):
""":returns: A queryset containing the root nodes in the tree."""
return get_result_class(cls).objects.filter(parent__isnull=True)
def get_depth(self, update=False):
"""
:returns: the depth (level) of the node
Caches the result in the object itself to help in loops.
:param update: Updates the cached value.
"""
if self.parent_id is None:
return 1
try:
if update:
del self._cached_depth
else:
return self._cached_depth
except AttributeError:
pass
depth = 0
node = self
while node:
node = node.parent
depth += 1
self._cached_depth = depth
return depth
def get_children(self):
""":returns: A queryset of all the node's children"""
return get_result_class(self.__class__).objects.filter(parent=self)
def get_parent(self, update=False):
""":returns: the parent node of the current node object."""
if self._meta.proxy_for_model:
# the current node is a proxy model; the returned parent
# should be the same proxy model, so we need to explicitly
# fetch it as an instance of that model rather than simply
# following the 'parent' relation
if self.parent_id is None:
return None
else:
return self.__class__.objects.get(pk=self.parent_id)
else:
return self.parent
def get_ancestors(self):
"""
:returns: A *list* containing the current node object's ancestors,
starting by the root node and descending to the parent.
"""
ancestors = []
if self._meta.proxy_for_model:
# the current node is a proxy model; our result set
# should use the same proxy model, so we need to
# explicitly fetch instances of that model
# when following the 'parent' relation
cls = self.__class__
node = self
while node.parent_id:
node = cls.objects.get(pk=node.parent_id)
ancestors.insert(0, node)
else:
node = self.parent
while node:
ancestors.insert(0, node)
node = node.parent
return ancestors
def get_root(self):
""":returns: the root node for the current node object."""
ancestors = self.get_ancestors()
if ancestors:
return ancestors[0]
return self
def is_descendant_of(self, node):
"""
:returns: ``True`` if the node if a descendant of another node given
as an argument, else, returns ``False``
"""
return self.pk in [obj.pk for obj in node.get_descendants()]
@classmethod
def dump_bulk(cls, parent=None, keep_ids=True):
"""Dumps a tree branch to a python data structure."""
serializable_cls = cls._get_serializable_model()
if (
parent and serializable_cls != cls and
parent.__class__ != serializable_cls
):
parent = serializable_cls.objects.get(pk=parent.pk)
# a list of nodes: not really a queryset, but it works
objs = serializable_cls.get_tree(parent)
ret, lnk = [], {}
pk_field = cls._meta.pk.attname
for node, pyobj in zip(objs, serializers.serialize('python', objs)):
depth = node.get_depth()
# django's serializer stores the attributes in 'fields'
fields = pyobj['fields']
del fields['parent']
# non-sorted trees have this
if 'sib_order' in fields:
del fields['sib_order']
if pk_field in fields:
del fields[pk_field]
newobj = {'data': fields}
if keep_ids:
newobj[pk_field] = pyobj['pk']
if (not parent and depth == 1) or\
(parent and depth == parent.get_depth()):
ret.append(newobj)
else:
parentobj = lnk[node.parent_id]
if 'children' not in parentobj:
parentobj['children'] = []
parentobj['children'].append(newobj)
lnk[node.pk] = newobj
return ret
def add_child(self, **kwargs):
"""Adds a child to the node."""
cls = get_result_class(self.__class__)
if len(kwargs) == 1 and 'instance' in kwargs:
# adding the passed (unsaved) instance to the tree
newobj = kwargs['instance']
if not newobj._state.adding:
raise NodeAlreadySaved("Attempted to add a tree node that is "\
"already in the database")
else:
newobj = cls(**kwargs)
try:
newobj._cached_depth = self._cached_depth + 1
except AttributeError:
pass
if not cls.node_order_by:
try:
max = cls.objects.filter(parent=self).reverse(
)[0].sib_order
except IndexError:
max = 0
newobj.sib_order = max + 1
newobj.parent = self
newobj.save()
return newobj
@classmethod
def _get_tree_recursively(cls, results, parent, depth):
if parent:
nodes = parent.get_children()
else:
nodes = cls.get_root_nodes()
for node in nodes:
node._cached_depth = depth
results.append(node)
cls._get_tree_recursively(results, node, depth + 1)
@classmethod
def get_tree(cls, parent=None):
"""
:returns: A list of nodes ordered as DFS, including the parent. If
no parent is given, the entire tree is returned.
"""
if parent:
depth = parent.get_depth() + 1
results = [parent]
else:
depth = 1
results = []
cls._get_tree_recursively(results, parent, depth)
return results
def get_descendants(self):
"""
:returns: A *list* of all the node's descendants, doesn't
include the node itself
"""
return self.__class__.get_tree(parent=self)[1:]
def get_descendant_count(self):
""":returns: the number of descendants of a nodee"""
return len(self.get_descendants())
def get_siblings(self):
"""
:returns: A queryset of all the node's siblings, including the node
itself.
"""
if self.parent:
return get_result_class(self.__class__).objects.filter(
parent=self.parent)
return self.__class__.get_root_nodes()
def add_sibling(self, pos=None, **kwargs):
"""Adds a new node as a sibling to the current node object."""
pos = self._prepare_pos_var_for_add_sibling(pos)
if len(kwargs) == 1 and 'instance' in kwargs:
# adding the passed (unsaved) instance to the tree
newobj = kwargs['instance']
if not newobj._state.adding:
raise NodeAlreadySaved("Attempted to add a tree node that is "\
"already in the database")
else:
# creating a new object
newobj = get_result_class(self.__class__)(**kwargs)
if not self.node_order_by:
newobj.sib_order = self.__class__._get_new_sibling_order(pos,
self)
newobj.parent_id = self.parent_id
newobj.save()
return newobj
@classmethod
def _is_target_pos_the_last_sibling(cls, pos, target):
return pos == 'last-sibling' or (
pos == 'right' and target == target.get_last_sibling())
@classmethod
def _make_hole_in_db(cls, min, target_node):
qset = get_result_class(cls).objects.filter(sib_order__gte=min)
if target_node.is_root():
qset = qset.filter(parent__isnull=True)
else:
qset = qset.filter(parent=target_node.parent)
qset.update(sib_order=models.F('sib_order') + 1)
@classmethod
def _make_hole_and_get_sibling_order(cls, pos, target_node):
siblings = target_node.get_siblings()
siblings = {
'left': siblings.filter(sib_order__gte=target_node.sib_order),
'right': siblings.filter(sib_order__gt=target_node.sib_order),
'first-sibling': siblings
}[pos]
sib_order = {
'left': target_node.sib_order,
'right': target_node.sib_order + 1,
'first-sibling': 1
}[pos]
try:
min = siblings.order_by('sib_order')[0].sib_order
except IndexError:
min = 0
if min:
cls._make_hole_in_db(min, target_node)
return sib_order
@classmethod
def _get_new_sibling_order(cls, pos, target_node):
if cls._is_target_pos_the_last_sibling(pos, target_node):
sib_order = target_node.get_last_sibling().sib_order + 1
else:
sib_order = cls._make_hole_and_get_sibling_order(pos, target_node)
return sib_order
def move(self, target, pos=None):
"""
Moves the current node and all it's descendants to a new position
relative to another node.
"""
pos = self._prepare_pos_var_for_move(pos)
sib_order = None
parent = None
if pos in ('first-child', 'last-child', 'sorted-child'):
# moving to a child
if not target.is_leaf():
target = target.get_last_child()
pos = {'first-child': 'first-sibling',
'last-child': 'last-sibling',
'sorted-child': 'sorted-sibling'}[pos]
else:
parent = target
if pos == 'sorted-child':
pos = 'sorted-sibling'
else:
pos = 'first-sibling'
sib_order = 1
if target.is_descendant_of(self):
raise InvalidMoveToDescendant(
_("Can't move node to a descendant."))
if self == target and (
(pos == 'left') or
(pos in ('right', 'last-sibling') and
target == target.get_last_sibling()) or
(pos == 'first-sibling' and
target == target.get_first_sibling())):
# special cases, not actually moving the node so no need to UPDATE
return
if pos == 'sorted-sibling':
if parent:
self.parent = parent
else:
self.parent = target.parent
else:
if sib_order:
self.sib_order = sib_order
else:
self.sib_order = self.__class__._get_new_sibling_order(pos,
target)
if parent:
self.parent = parent
else:
self.parent = target.parent
self.save()
class Meta:
"""Abstract model."""
abstract = True
| |
from django.conf import settings
from reportlab.graphics.charts.barcharts import HorizontalBarChart
from reportlab.graphics.charts.legends import Legend
from reportlab.graphics.shapes import Drawing
from reportlab.lib import colors
from reportlab.lib.colors import HexColor
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch, mm
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
from reportlab.platypus import Flowable, PageBreak, Paragraph, KeepTogether, Table, TableStyle
from reportlab.platypus.doctemplate import SimpleDocTemplate
from itertools import groupby
import os
fontPath = os.path.join(settings.PROJECT_ROOT,
'myvoice/static/lib/bootstrap-3.2.0/fonts/glyphicons-halflings-regular.ttf')
pdfmetrics.registerFont(TTFont('Glyphicons Halflings', fontPath))
barColors = [colors.black,
colors.black.clone(alpha=0.5),
colors.black.clone(alpha=0.2)]
styles = getSampleStyleSheet()
sectionHeadStyle = styles['Normal'].clone(
'SectionHead',
backColor=colors.black.clone(alpha=0.4),
borderPadding=(6, 10, 8),
fontSize=11,
spaceBefore=35,
spaceAfter=15)
bodyText9pt = styles['BodyText'].clone('BodyText9pt', fontSize=9)
styles.add(sectionHeadStyle)
styles.add(bodyText9pt)
def p(text, style=styles['BodyText9pt']):
"""Renders the supplied `text` as a paragraph, using the `style` specified."""
return Paragraph(text, style)
def heading(text):
"""Renders the supplied `text` as a section heading."""
return p('<font color=white>%s</font>' % text.upper(), styles['SectionHead'])
def page_break():
"""Renders a page break."""
return PageBreak()
def add_page_number(canvas, doc):
"""Adds page numbers to the supplied document."""
text = 'Page %s' % doc.page
canvas.setFont('Helvetica', 8)
canvas.drawCentredString(100*mm, 10*mm, text)
class FacilityChart(Drawing):
def __init__(self, width=480, height=480, *args, **kwargs):
Drawing.__init__(self, width, height, *args, **kwargs)
self.add(HorizontalBarChart(), name='chart')
self.chart.width = self.width - 100
self.chart.height = self.height - 80
self.chart.x = 60
self.chart.y = 60
self.chart.barSpacing = 1
self.chart.groupSpacing = 6
self.chart.bars[0].fillColor = barColors[2]
self.chart.bars[1].fillColor = barColors[1]
self.chart.bars[2].fillColor = barColors[0]
self.chart.bars.strokeWidth = 0
self.chart.barLabelFormat = '%d'
self.chart.barLabels.boxAnchor = 'w'
self.chart.barLabels.fontSize = 8
self.chart.barLabels.leftPadding = 3
self.chart.barLabels.textAnchor = 'middle'
self.chart.categoryAxis.strokeColor = barColors[1]
self.chart.categoryAxis.labels.fontSize = 9
self.chart.categoryAxis.labels.textAnchor = 'end'
self.chart.valueAxis.valueMin = 0
self.chart.valueAxis.strokeColor = barColors[1]
self.chart.valueAxis.labels.fontSize = 9
self.add(Legend(), name='legend')
self.legend.alignment = 'right'
self.legend.fontSize = 10
self.legend.x = int(0.24 * self.width)
self.legend.y = 25
self.legend.boxAnchor = 'nw'
self.legend.colorNamePairs = [
(barColors[0], 'Surveys Sent'),
(barColors[1], 'Surveys Started'),
(barColors[2], 'Surveys Completed')
]
self.legend.dxTextSpace = 5
self.legend.dy = 6
self.legend.dx = 6
self.legend.deltay = 5
self.legend.columnMaximum = 1
self.legend.strokeWidth = 0
class Bookmark(Flowable):
def __init__(self, title):
self.title = title
Flowable.__init__(self)
def wrap(self, availWidth, availHeight):
return (0, 0)
def draw(self):
self.canv.bookmarkPage(self.title)
self.canv.addOutlineEntry(
self.title, self.title, 0, 0)
class ReportPdfRenderer(object):
def summary(self, clinic, sent, started, completed):
"""Renders the Facility Report Summary section."""
p_started = ((float(started) / sent) * 100) if sent else 0
p_completed = ((float(completed) / sent) * 100) if sent else 0
summary_tbl = Table([
(p('<b>SURVEY PARTICIPATION</b>'), '', ''),
(p('%s' % sent),
p('%s (%.0f%%)' % (started, p_started)),
p('%s (%.0f%%)' % (completed, p_completed))),
('Sent', 'Started', 'Completed')
], colWidths=[0.8*inch, 0.8*inch, 0.8*inch])
summary_tbl.setStyle(TableStyle([
('SPAN', (0, 0), (-1, 0)),
('TOPPADDING', (0, 1), (-1, 1), 8),
('BOTTOMPADDING', (0, 1), (-1, 1), 8),
('LINEBELOW', (0, 1), (-1, 1), 0.4, colors.black),
('FONTSIZE', (0, 1), (-1, 1), 11),
('FONTSIZE', (0, -1), (-1, -1), 8),
]))
tbl = Table([
(p('<font size=14><b>%s</b> Facility Report</font>' % clinic.name), ''),
(p("""The following document was generated through the
ICT4SA program, intended to provide trial period
reporting to selected %s Clinic Staff. The following
data was collected through SMS surveys of patients at
%s.""" % (clinic.lga, clinic.name)), summary_tbl)
], colWidths=[3.8*inch, 2.6*inch])
tbl.setStyle(TableStyle([
('SPAN', (0, 0), (-1, 0)),
('BOTTOMPADDING', (0, 0), (-1, 0), 8),
('RIGHTPADDING', (0, -1), (0, -1), 10),
('RIGHTPADDING', (-1, -1), (-1, -1), 10),
('LEFTPADDING', (-1, -1), (-1, -1), 10),
('BACKGROUND', (-1, -1), (-1, -1), colors.grey.clone(alpha=0.1)),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
]))
return tbl
def facility_chart(self, clinic, stats, clinics):
"""Renders the Facility Participation Chart."""
flowables = []
flowables.append(heading('Participation By Facility'))
flowables.append(p("""Number of patients who received, started,
and completed surveys across %s.""" % clinic.name))
d = FacilityChart()
data = [stats['completed'], stats['started'], stats['sent']]
d.chart.data = data
d.chart.categoryAxis.categoryNames = clinics
# Highlight the current facility (better usability):
ndx = clinics.index('\n'.join(clinic.name.split()))
d.chart.bars[(0, ndx)].fillColor = HexColor('#afe25b')
d.chart.bars[(1, ndx)].fillColor = HexColor('#92bd4b')
d.chart.bars[(2, ndx)].fillColor = HexColor('#6e8f37')
flowables.append(d)
return KeepTogether(flowables)
def feedback_responses(self, clinic, responses):
"""Renders the Feedback Responses section."""
cellBgColor = colors.grey.clone(alpha=0.2)
flowables = []
flowables.append(heading('Patient Feedback Responses'))
flowables.append(p("""<para spaceafter=12>Summary of patient feedback responses
compared to LGA-wide averages.</para>"""))
rows = [('Patients from %s said:' % clinic.name, '', '',
'Patients from other facilities said:', '', '')]
for r in responses:
if r[3] >= 10:
symbol = u'\ue013'
elif r[3] <= -10:
symbol = u'\ue014'
else:
symbol = u''
rows.append((
'%d (%.0f%%)' % r[1], r[0], u'\ue107' if r[1][1] < 30.0 else ' ',
'%d (%.0f%%)' % r[2], r[0], symbol
))
tbl = Table(rows, colWidths=[0.9*inch, 2*inch, 0.3*inch] * 2)
tbl.setStyle(TableStyle([
('SPAN', (0, 0), (2, 0)),
('SPAN', (3, 0), (5, 0)),
('BACKGROUND', (0, 1), (0, -1), cellBgColor),
('BACKGROUND', (3, 1), (3, -1), cellBgColor),
('FONTSIZE', (0, 0), (-1, -1), 9),
('FONTSIZE', (0, 0), (0, -1), 10),
('FONTSIZE', (3, 0), (3, -1), 10),
('LEFTPADDING', (0, 0), (-1, 0), 2),
('RIGHTPADDING', (0, 0), (-1, 0), 2),
('TOPPADDING', (0, 1), (-1, -1), 8),
('BOTTOMPADDING', (0, 1), (-1, -1), 8),
('ALIGN', (0, 0), (-1, -1), 'LEFT'),
('VALIGN', (0, 0), (-1, -1), 'MIDDLE'),
('FONT', (2, 0), (2, -1), 'Glyphicons Halflings'),
('FONT', (5, 0), (5, -1), 'Glyphicons Halflings'),
('GRID', (0, 0), (-1, -1), 3, colors.white),
]))
flowables.append(tbl)
legend = Table([
('KEY', '', '', '', '', ''),
(u'\ue107', 'Problem area;\nrequires attention',
u'\ue014', '%s performed worse\nthan the LGA average' % clinic.name,
u'\ue013', '%s performed better\nthan the LGA average' % clinic.name)
])
legend.setStyle(TableStyle([
('SPAN', (0, 0), (-1, 0)),
('TOPPADDING', (0, 0), (-1, 0), 15),
('TOPPADDING', (0, 1), (-1, -1), 6),
('LINEBELOW', (0, 0), (-1, 0), 1, cellBgColor),
('FONT', (0, 1), (0, 1), 'Glyphicons Halflings'),
('FONT', (2, 1), (2, 1), 'Glyphicons Halflings'),
('FONT', (4, 1), (4, 1), 'Glyphicons Halflings'),
('FONTSIZE', (1, 1), (1, 1), 7),
('FONTSIZE', (3, 1), (3, 1), 7),
('FONTSIZE', (5, 1), (5, 1), 7),
('ALIGN', (0, 0), (-1, -1), 'LEFT'),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
]))
flowables.append(legend)
return KeepTogether(flowables)
def feedback_on_servies(self, min_date, max_date, data):
"""Renders the Feedback on Services section."""
flowables = []
flowables.append(heading('Feedback On Services'))
flowables.append(p("""<para spaceafter=12>Number of patients with this service,
who reported this feedback.</para>"""))
if data:
service, feedback = data[0]
if min_date and max_date:
rows = [['%s to %s' % (min_date.strftime('%B %d, %Y'),
max_date.strftime('%B %d, %Y'))
] + ['\n'.join(x[0].split()) for x in feedback]]
else:
rows = [[''] + ['\n'.join(x[0].split()) for x in feedback]]
for service, feedback in data:
row = [service] + ['%s (%s)' % ('0' if not x else x, 0 if y is None else y)
for _, x, y in feedback[:-1]] + [
'%s (%s)' % ('N/A' if not x else x, 0 if y is None else y)
for _, x, y in feedback[-1:]]
rows.append(row)
width = 4.6 / (len(rows[0]) - 1)
tbl = Table(rows, colWidths=[1.8*inch] + [width*inch] * (len(rows[0]) - 1))
tbl.setStyle(TableStyle([
('GRID', (0, 0), (-1, -1), 0.3, colors.grey),
('FONTSIZE', (0, 0), (-1, -1), 9),
('FONTSIZE', (0, 0), (-1, 0), 7),
('ROWBACKGROUNDS', (0, 0), (-1, -1), (0xF9F9F9, None)),
('BACKGROUND', (0, 0), (0, 0), 0xE5E6E7),
('TOPPADDING', (0, 1), (-1, -1), 5),
('BOTTOMPADDING', (0, 1), (-1, -1), 5),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
]))
flowables.append(tbl)
return KeepTogether(flowables)
def detailed_comments(self, min_date, max_date, data):
"""Renders the Detailed Comments section."""
flowables = []
flowables.append(heading('Detailed Comments'))
tblStyle = TableStyle([
('SPAN', (0, 0), (-1, 0)),
('GRID', (0, 0), (-1, -1), 0.3, colors.grey),
('FONTSIZE', (0, 0), (-1, -1), 8),
('FONTSIZE', (0, 0), (-1, 0), 5),
('ROWBACKGROUNDS', (0, 0), (-1, -1), (0xF9F9F9, None)),
('BACKGROUND', (0, 0), (-1, 0), colors.black.clone(alpha=0.4)),
('TOPPADDING', (0, 1), (-1, -1), 6),
('BOTTOMPADDING', (0, 1), (-1, -1), 6),
('TOPPADDING', (0, 1), (-1, 0), 3),
('BOTTOMPADDING', (0, 1), (-1, 0), 3),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
])
tbl = Table([('Date', 'Comments')], colWidths=[0.8*inch, 5.6*inch])
tbl.setStyle(TableStyle([
('GRID', (0, 0), (-1, -1), 0.3, colors.grey),
('FONTSIZE', (0, 0), (-1, -1), 8),
('TOPPADDING', (0, 1), (-1, -1), 8),
('BOTTOMPADDING', (0, 1), (-1, -1), 8),
]))
flowables.append(tbl)
if min_date and max_date:
# Filter out comments outside the displayed date range:
data = (x for x in data if min_date <= x['datetime'] <= max_date)
grouped_comments = groupby(data, lambda x: x['question'])
for question, comments in grouped_comments:
tbl = Table([(p('<font color=white>%s</font>' % question.upper()), '')] + [
(x['datetime'].strftime('%d/%m/%Y'), p(x['response'])) for x in comments
], colWidths=[0.8*inch, 5.6*inch])
tbl.setStyle(tblStyle)
flowables.append(tbl)
return flowables
def render_to_list(self, ctx):
"""Renders all report sections, returning them as a list of flowables."""
elements = []
elements.append(Bookmark(str(ctx['clinic'])))
elements.append(self.summary(
ctx['clinic'], ctx['num_registered'], ctx['num_started'], ctx['num_completed']))
elements.append(self.facility_chart(
ctx['clinic'], ctx['feedback_stats'], ctx['feedback_clinics']))
elements.append(self.feedback_responses(ctx['clinic'], ctx['response_stats']))
elements.append(self.feedback_on_servies(
ctx['min_date'], ctx['max_date'], ctx['feedback_by_service']))
elements.extend(self.detailed_comments(
ctx['min_date'], ctx['max_date'], ctx['detailed_comments']))
elements.append(page_break())
return elements
def render_to_response(self, flowables, outfile):
"""Renders a list of flowables as a PDF to the specified `outfile`."""
doc = SimpleDocTemplate(outfile)
doc.build(flowables, onFirstPage=add_page_number, onLaterPages=add_page_number)
return outfile
| |
# -*- coding: utf-8 -*-
"""
Provides step definitions to perform tests with the Python logging subsystem.
.. code-block: gherkin
Given I create log records with:
| category | level | message |
| foo.bar | WARN | Hello LogRecord |
| bar | CURRENT | Hello LogRecord |
And I create a log record with:
| category | level | message |
| foo | ERROR | Hello Foo |
Then the command output should contain the following log records:
| category | level | message |
| bar | CURRENT | xxx |
Then the command output should not contain the following log records:
| category | level | message |
| bar | CURRENT | xxx |
Then the file "behave.log" should contain the log records:
| category | level | message |
| bar | CURRENT | xxx |
Then the file "behave.log" should not contain the log records:
| category | level | message |
| bar | CURRENT | xxx |
Given I define the log record schema:
| category | level | message |
| root | INFO | Hello LogRecord |
And I create log records with:
| category | level | message |
| foo.bar | INFO | Hello LogRecord |
| bar | INFO | Hello LogRecord |
Then the command output should contain log records from categories
| category |
| foo.bar |
| bar |
Given I use the log record configuration:
| property | value |
| format | LOG.%(levelname)-8s %(name)s %(message)s |
| datefmt | |
IDEA:
.. code-block:: gherkin
Given I capture log records
When I create log records with:
| category | level | message |
| foo.bar | WARN | Hello LogRecord |
Then the captured log should contain the following log records:
| category | level | message |
| bar | CURRENT | xxx |
And the captured log should not contain the following log records:
| category | level | message |
| bar | CURRENT | xxx |
"""
from __future__ import absolute_import
from behave import given, when, then, step
from behave4cmd0.command_steps import \
step_file_should_contain_multiline_text, \
step_file_should_not_contain_multiline_text
from behave.configuration import LogLevel
from behave.log_capture import LoggingCapture
import logging
# -----------------------------------------------------------------------------
# STEP UTILS:
# -----------------------------------------------------------------------------
def make_log_record(category, level, message):
if category in ("root", "__ROOT__"):
category = None
logger = logging.getLogger(category)
logger.log(level, message)
def make_log_record_output(category, level, message,
format=None, datefmt=None, **kwargs):
"""
Create the output for a log record, like performed by :mod:`logging` module.
:param category: Name of the logger (as string or None).
:param level: Log level (as number).
:param message: Log message to use.
:returns: Log record output (as string)
"""
if not category or (category == "__ROOT__"):
category = "root"
levelname = logging.getLevelName(level)
record_data = dict(name=category, levelname=levelname, msg=message)
record_data.update(kwargs)
record = logging.makeLogRecord(record_data)
formatter = logging.Formatter(format, datefmt=datefmt)
return formatter.format(record)
class LogRecordTable(object):
@classmethod
def make_output_for_row(cls, row, format=None, datefmt=None, **kwargs):
category = row.get("category", None)
level = LogLevel.parse_type(row.get("level", "INFO"))
message = row.get("message", "__UNDEFINED__")
return make_log_record_output(category, level, message,
format, datefmt, **kwargs)
@staticmethod
def annotate_with_row_schema(table, row_schema):
"""
Annotate/extend a table of log-records with additional columns from
the log-record schema if columns are missing.
:param table: Table w/ log-records (as :class:`behave.model.Table`)
:param row_schema: Log-record row schema (as dict).
"""
for column, value in row_schema.items():
if column not in table.headings:
table.add_column(column, default_value=value)
# -----------------------------------------------------------------------------
# STEP DEFINITIONS:
# -----------------------------------------------------------------------------
# @step('I create log records for the following categories')
# def step_I_create_logrecords_for_categories_with_text(context):
# assert context.text is not None, "REQUIRE: context.text"
# current_level = context.config.logging_level
# categories = context.text.split()
# for category_name in categories:
# logger = logging.getLogger(category_name)
# logger.log(current_level, "__LOG_RECORD__")
@step('I create log records with')
def step_I_create_logrecords_with_table(context):
"""
Step definition that creates one more log records by using a table.
.. code-block: gherkin
When I create log records with:
| category | level | message |
| foo | ERROR | Hello Foo |
| foo.bar | WARN | Hello Foo.Bar |
Table description
------------------
| Column | Type | Required | Description |
| category | string | yes | Category (or logger) to use. |
| level | LogLevel | yes | Log level to use. |
| message | string | yes | Log message to use. |
.. code-block: python
import logging
from behave.configuration import LogLevel
for row in table.rows:
logger = logging.getLogger(row.category)
level = LogLevel.parse_type(row.level)
logger.log(level, row.message)
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
for row in context.table.rows:
category = row["category"]
if category == "__ROOT__":
category = None
level = LogLevel.parse_type(row["level"])
message = row["message"]
make_log_record(category, level, message)
@step('I create a log record with')
def step_I_create_logrecord_with_table(context):
"""
Create an log record by using a table to provide the parts.
.. seealso: :func:`step_I_create_logrecords_with_table()`
"""
assert context.table, "REQUIRE: context.table"
assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1"
step_I_create_logrecords_with_table(context)
@step('I define the log record schema')
def step_I_define_logrecord_schema_with_table(context):
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
assert len(context.table.rows) == 1, \
"REQUIRE: context.table.rows.size(%s) == 1" % (len(context.table.rows))
row = context.table.rows[0]
row_schema = dict(category=row["category"], level=row["level"],
message=row["message"])
context.log_record_row_schema = row_schema
@then('the command output should contain the following log records')
def step_command_output_should_contain_log_records(context):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the command output should contain the following log records:
| category | level | message |
| bar | CURRENT | xxx |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
format = getattr(context, "log_record_format", context.config.logging_format)
for row in context.table.rows:
output = LogRecordTable.make_output_for_row(row, format)
context.execute_steps(u'''
Then the command output should contain:
"""
{expected_output}
"""
'''.format(expected_output=output))
@then('the command output should not contain the following log records')
def step_command_output_should_not_contain_log_records(context):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the command output should contain the following log records:
| category | level | message |
| bar | CURRENT | xxx |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
format = getattr(context, "log_record_format", context.config.logging_format)
for row in context.table.rows:
output = LogRecordTable.make_output_for_row(row, format)
context.execute_steps(u'''
Then the command output should not contain:
"""
{expected_output}
"""
'''.format(expected_output=output))
@then('the command output should contain the following log record')
def step_command_output_should_contain_log_record(context):
assert context.table, "REQUIRE: context.table"
assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1"
step_command_output_should_contain_log_records(context)
@then('the command output should not contain the following log record')
def step_command_output_should_not_contain_log_record(context):
assert context.table, "REQUIRE: context.table"
assert len(context.table.rows) == 1, "REQUIRE: table.row.size == 1"
step_command_output_should_not_contain_log_records(context)
@then('the command output should contain log records from categories')
def step_command_output_should_contain_log_records_from_categories(context):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Given I define a log record schema:
| category | level | message |
| root | ERROR | __LOG_MESSAGE__ |
Then the command output should contain log records from categories:
| category |
| bar |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_column("category")
record_schema = context.log_record_row_schema
LogRecordTable.annotate_with_row_schema(context.table, record_schema)
step_command_output_should_contain_log_records(context)
context.table.remove_columns(["level", "message"])
@then('the command output should not contain log records from categories')
def step_command_output_should_not_contain_log_records_from_categories(context):
"""
Verifies that the command output contains not log records from
the provided log categories (in any order).
.. code-block: gherkin
Given I define the log record schema:
| category | level | message |
| root | ERROR | __LOG_MESSAGE__ |
Then the command output should not contain log records from categories:
| category |
| bar |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_column("category")
record_schema = context.log_record_row_schema
LogRecordTable.annotate_with_row_schema(context.table, record_schema)
step_command_output_should_not_contain_log_records(context)
context.table.remove_columns(["level", "message"])
@then('the file "{filename}" should contain the log records')
def step_file_should_contain_log_records(context, filename):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the file "xxx.log" should contain the log records:
| category | level | message |
| bar | CURRENT | xxx |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
format = getattr(context, "log_record_format", context.config.logging_format)
for row in context.table.rows:
output = LogRecordTable.make_output_for_row(row, format)
context.text = output
step_file_should_contain_multiline_text(context, filename)
@then('the file "{filename}" should not contain the log records')
def step_file_should_not_contain_log_records(context, filename):
"""
Verifies that the command output contains the specified log records
(in any order).
.. code-block: gherkin
Then the file "xxx.log" should not contain the log records:
| category | level | message |
| bar | CURRENT | xxx |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["category", "level", "message"])
format = getattr(context, "log_record_format", context.config.logging_format)
for row in context.table.rows:
output = LogRecordTable.make_output_for_row(row, format)
context.text = output
step_file_should_not_contain_multiline_text(context, filename)
@step('I use "{log_record_format}" as log record format')
def step_use_log_record_format_text(context, log_record_format):
context.log_record_format = log_record_format
@step('I use the log record configuration')
def step_use_log_record_configuration(context):
"""
Define log record configuration parameters.
.. code-block: gherkin
Given I use the log record configuration:
| property | value |
| format | |
| datefmt | |
"""
assert context.table, "REQUIRE: context.table"
context.table.require_columns(["property", "value"])
for row in context.table.rows:
property_name = row["property"]
value = row["value"]
if property_name == "format":
context.log_record_format = value
elif property_name == "datefmt":
context.log_record_datefmt = value
else:
raise KeyError("Unknown property=%s" % property_name)
# -----------------------------------------------------------------------------
# TODO: STEP DEFINITIONS:
# -----------------------------------------------------------------------------
@step('I capture log records with level "{level}" or above')
def step_I_capture_logrecords(context, level):
raise NotImplementedError()
@step('I capture log records')
def step_I_capture_logrecords(context):
"""
.. code-block: gherkin
Given I capture log records
When I capture log records
:param context:
"""
raise NotImplementedError()
logcapture = getattr(context, "logcapture", None)
if not logcapture:
context.logcapture = LoggingCapture()
| |
# coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.9.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HostGroupResponse(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'constraint': 'Constraint',
'recipe_ids': 'list[int]',
'recovery_mode': 'str',
'id': 'int',
'recipes': 'list[RecipeResponse]',
'extended_recipes': 'list[str]',
'metadata': 'list[HostMetadata]'
}
attribute_map = {
'name': 'name',
'constraint': 'constraint',
'recipe_ids': 'recipeIds',
'recovery_mode': 'recoveryMode',
'id': 'id',
'recipes': 'recipes',
'extended_recipes': 'extendedRecipes',
'metadata': 'metadata'
}
def __init__(self, name=None, constraint=None, recipe_ids=None, recovery_mode=None, id=None, recipes=None, extended_recipes=None, metadata=None):
"""
HostGroupResponse - a model defined in Swagger
"""
self._name = None
self._constraint = None
self._recipe_ids = None
self._recovery_mode = None
self._id = None
self._recipes = None
self._extended_recipes = None
self._metadata = None
self.name = name
self.constraint = constraint
if recipe_ids is not None:
self.recipe_ids = recipe_ids
if recovery_mode is not None:
self.recovery_mode = recovery_mode
if id is not None:
self.id = id
if recipes is not None:
self.recipes = recipes
if extended_recipes is not None:
self.extended_recipes = extended_recipes
if metadata is not None:
self.metadata = metadata
@property
def name(self):
"""
Gets the name of this HostGroupResponse.
name of the resource
:return: The name of this HostGroupResponse.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this HostGroupResponse.
name of the resource
:param name: The name of this HostGroupResponse.
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`")
self._name = name
@property
def constraint(self):
"""
Gets the constraint of this HostGroupResponse.
instance group or resource constraint for a hostgroup
:return: The constraint of this HostGroupResponse.
:rtype: Constraint
"""
return self._constraint
@constraint.setter
def constraint(self, constraint):
"""
Sets the constraint of this HostGroupResponse.
instance group or resource constraint for a hostgroup
:param constraint: The constraint of this HostGroupResponse.
:type: Constraint
"""
if constraint is None:
raise ValueError("Invalid value for `constraint`, must not be `None`")
self._constraint = constraint
@property
def recipe_ids(self):
"""
Gets the recipe_ids of this HostGroupResponse.
referenced recipe ids
:return: The recipe_ids of this HostGroupResponse.
:rtype: list[int]
"""
return self._recipe_ids
@recipe_ids.setter
def recipe_ids(self, recipe_ids):
"""
Sets the recipe_ids of this HostGroupResponse.
referenced recipe ids
:param recipe_ids: The recipe_ids of this HostGroupResponse.
:type: list[int]
"""
self._recipe_ids = recipe_ids
@property
def recovery_mode(self):
"""
Gets the recovery_mode of this HostGroupResponse.
recovery mode of the hostgroup's nodes
:return: The recovery_mode of this HostGroupResponse.
:rtype: str
"""
return self._recovery_mode
@recovery_mode.setter
def recovery_mode(self, recovery_mode):
"""
Sets the recovery_mode of this HostGroupResponse.
recovery mode of the hostgroup's nodes
:param recovery_mode: The recovery_mode of this HostGroupResponse.
:type: str
"""
allowed_values = ["MANUAL", "AUTO"]
if recovery_mode not in allowed_values:
raise ValueError(
"Invalid value for `recovery_mode` ({0}), must be one of {1}"
.format(recovery_mode, allowed_values)
)
self._recovery_mode = recovery_mode
@property
def id(self):
"""
Gets the id of this HostGroupResponse.
id of the resource
:return: The id of this HostGroupResponse.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this HostGroupResponse.
id of the resource
:param id: The id of this HostGroupResponse.
:type: int
"""
self._id = id
@property
def recipes(self):
"""
Gets the recipes of this HostGroupResponse.
referenced recipes
:return: The recipes of this HostGroupResponse.
:rtype: list[RecipeResponse]
"""
return self._recipes
@recipes.setter
def recipes(self, recipes):
"""
Sets the recipes of this HostGroupResponse.
referenced recipes
:param recipes: The recipes of this HostGroupResponse.
:type: list[RecipeResponse]
"""
self._recipes = recipes
@property
def extended_recipes(self):
"""
Gets the extended_recipes of this HostGroupResponse.
referenced extended recipes
:return: The extended_recipes of this HostGroupResponse.
:rtype: list[str]
"""
return self._extended_recipes
@extended_recipes.setter
def extended_recipes(self, extended_recipes):
"""
Sets the extended_recipes of this HostGroupResponse.
referenced extended recipes
:param extended_recipes: The extended_recipes of this HostGroupResponse.
:type: list[str]
"""
self._extended_recipes = extended_recipes
@property
def metadata(self):
"""
Gets the metadata of this HostGroupResponse.
metadata of hosts
:return: The metadata of this HostGroupResponse.
:rtype: list[HostMetadata]
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this HostGroupResponse.
metadata of hosts
:param metadata: The metadata of this HostGroupResponse.
:type: list[HostMetadata]
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HostGroupResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import absolute_import
import os
import time
import struct
import sys
import re
import base64
import math
import random
from datetime import datetime, timedelta
from pprint import pformat
__all__ = ("utcnow",
"parseutc",
"utcstr",
"id",
"rid",
"newid",
"rtime",
"Stopwatch",
"Tracker",
"EqualityMixin",
"IdGenerator")
def utcnow():
"""
Get current time in UTC as ISO 8601 string.
:returns: Current time as string in ISO 8601 format.
:rtype: unicode
"""
now = datetime.utcnow()
return u"{0}Z".format(now.strftime(u"%Y-%m-%dT%H:%M:%S.%f")[:-3])
def utcstr(ts):
"""
Format UTC timestamp in ISO 8601 format.
:param ts: The timestamp to format.
:type ts: instance of :py:class:`datetime.datetime`
:returns: Timestamp formatted in ISO 8601 format.
:rtype: unicode
"""
if ts:
return u"{0}Z".format(ts.strftime(u"%Y-%m-%dT%H:%M:%S.%f")[:-3])
else:
return ts
def parseutc(datestr):
"""
Parse an ISO 8601 combined date and time string, like i.e. ``"2011-11-23T12:23:00Z"``
into a UTC datetime instance.
.. deprecated:: 0.8.12
Use the **iso8601** module instead (e.g. ``iso8601.parse_date("2014-05-23T13:03:44.123Z")``)
:param datestr: The datetime string to parse.
:type datestr: unicode
:returns: The converted datetime object.
:rtype: instance of :py:class:`datetime.datetime`
"""
try:
return datetime.strptime(datestr, u"%Y-%m-%dT%H:%M:%SZ")
except ValueError:
return None
class IdGenerator(object):
"""
ID generator for WAMP request IDs.
WAMP request IDs are sequential per WAMP session, starting at 0 and
wrapping around at 2**53 (both value are inclusive [0, 2**53]).
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or only) number type (JavaScript, Lua, etc).
See https://github.com/tavendo/WAMP/blob/master/spec/basic.md#ids
"""
def __init__(self):
self._next = -1
def next(self):
"""
Returns next ID.
:returns: The next ID.
:rtype: int
"""
self._next += 1
if self._next > 9007199254740992:
self._next = 0
return self._next
# generator protocol
def __next__(self):
return self.next()
#
# Performance comparison of IdGenerator.next(), id() and rid().
#
# All tests were performed on:
#
# - Ubuntu 14.04 LTS x86-64
# - Intel Core i7 920 @ 3.3GHz
#
# The tests generated 100 mio. IDs and run-time was measured
# as wallclock from Unix "time" command. In each run, a single CPU
# core was essentially at 100% load all the time (though the sys/usr
# ratio was different).
#
# PyPy 2.6.1:
#
# IdGenerator.next() 0.5s
# id() 29.4s
# rid() 106.1s
#
# CPython 2.7.10:
#
# IdGenerator.next() 49.0s
# id() 370.5s
# rid() 196.4s
#
#
# Note on the ID range [0, 2**53]. We once reduced the range to [0, 2**31].
# This lead to extremely hard to track down issues due to ID collisions!
# Here: https://github.com/tavendo/AutobahnPython/issues/419#issue-90483337
#
# 8 byte mask with 53 LSBs set (WAMP requires IDs from [0, 2**53]
_WAMP_ID_MASK = struct.unpack(">Q", b"\x00\x1f\xff\xff\xff\xff\xff\xff")[0]
def rid():
"""
Generate a new random integer ID from range **[0, 2**53]**.
The generated ID is uniformly distributed over the whole range, doesn't have
a period (no pseudo-random generator is used) and cryptographically strong.
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or only) number type (JavaScript, Lua, etc).
:returns: A random integer ID.
:rtype: int
"""
return struct.unpack("@Q", os.urandom(8))[0] & _WAMP_ID_MASK
# noinspection PyShadowingBuiltins
def id():
"""
Generate a new random integer ID from range **[0, 2**53]**.
The generated ID is based on a pseudo-random number generator (Mersenne Twister,
which has a period of 2**19937-1). It is NOT cryptographically strong, and
hence NOT suitable to generate e.g. secret keys or access tokens.
The upper bound **2**53** is chosen since it is the maximum integer that can be
represented as a IEEE double such that all smaller integers are representable as well.
Hence, IDs can be safely used with languages that use IEEE double as their
main (or only) number type (JavaScript, Lua, etc).
:returns: A random integer ID.
:rtype: int
"""
return random.randint(0, 9007199254740992)
def newid(length=16):
"""
Generate a new random string ID.
The generated ID is uniformly distributed and cryptographically strong. It is
hence usable for things like secret keys and access tokens.
:param length: The length (in chars) of the ID to generate.
:type length: int
:returns: A random string ID.
:rtype: unicode
"""
l = int(math.ceil(float(length) * 6. / 8.))
return base64.b64encode(os.urandom(l))[:length].decode('ascii')
# Select the most precise walltime measurement function available
# on the platform
#
if sys.platform.startswith('win'):
# On Windows, this function returns wall-clock seconds elapsed since the
# first call to this function, as a floating point number, based on the
# Win32 function QueryPerformanceCounter(). The resolution is typically
# better than one microsecond
_rtime = time.clock
_ = _rtime() # this starts wallclock
else:
# On Unix-like platforms, this used the first available from this list:
# (1) gettimeofday() -- resolution in microseconds
# (2) ftime() -- resolution in milliseconds
# (3) time() -- resolution in seconds
_rtime = time.time
rtime = _rtime
"""
Precise wallclock time.
:returns: The current wallclock in seconds. Returned values are only guaranteed
to be meaningful relative to each other.
:rtype: float
"""
class Stopwatch(object):
"""
Stopwatch based on walltime.
This can be used to do code timing and uses the most precise walltime measurement
available on the platform. This is a very light-weight object,
so create/dispose is very cheap.
"""
def __init__(self, start=True):
"""
:param start: If ``True``, immediately start the stopwatch.
:type start: bool
"""
self._elapsed = 0
if start:
self._started = rtime()
self._running = True
else:
self._started = None
self._running = False
def elapsed(self):
"""
Return total time elapsed in seconds during which the stopwatch was running.
:returns: The elapsed time in seconds.
:rtype: float
"""
if self._running:
now = rtime()
return self._elapsed + (now - self._started)
else:
return self._elapsed
def pause(self):
"""
Pauses the stopwatch and returns total time elapsed in seconds during which
the stopwatch was running.
:returns: The elapsed time in seconds.
:rtype: float
"""
if self._running:
now = rtime()
self._elapsed += now - self._started
self._running = False
return self._elapsed
else:
return self._elapsed
def resume(self):
"""
Resumes a paused stopwatch and returns total elapsed time in seconds
during which the stopwatch was running.
:returns: The elapsed time in seconds.
:rtype: float
"""
if not self._running:
self._started = rtime()
self._running = True
return self._elapsed
else:
now = rtime()
return self._elapsed + (now - self._started)
def stop(self):
"""
Stops the stopwatch and returns total time elapsed in seconds during which
the stopwatch was (previously) running.
:returns: The elapsed time in seconds.
:rtype: float
"""
elapsed = self.pause()
self._elapsed = 0
self._started = None
self._running = False
return elapsed
class Tracker(object):
"""
A key-based statistics tracker.
"""
def __init__(self, tracker, tracked):
"""
"""
self.tracker = tracker
self.tracked = tracked
self._timings = {}
self._offset = rtime()
self._dt_offset = datetime.utcnow()
def track(self, key):
"""
Track elapsed for key.
:param key: Key under which to track the timing.
:type key: str
"""
self._timings[key] = rtime()
def diff(self, start_key, end_key, formatted=True):
"""
Get elapsed difference between two previously tracked keys.
:param start_key: First key for interval (older timestamp).
:type start_key: str
:param end_key: Second key for interval (younger timestamp).
:type end_key: str
:param formatted: If ``True``, format computed time period and return string.
:type formatted: bool
:returns: Computed time period in seconds (or formatted string).
:rtype: float or str
"""
if end_key in self._timings and start_key in self._timings:
d = self._timings[end_key] - self._timings[start_key]
if formatted:
if d < 0.00001: # 10us
s = "%d ns" % round(d * 1000000000.)
elif d < 0.01: # 10ms
s = "%d us" % round(d * 1000000.)
elif d < 10: # 10s
s = "%d ms" % round(d * 1000.)
else:
s = "%d s" % round(d)
return s.rjust(8)
else:
return d
else:
if formatted:
return "n.a.".rjust(8)
else:
return None
def absolute(self, key):
"""
Return the UTC wall-clock time at which a tracked event occurred.
:param key: The key
:type key: str
:returns: Timezone-naive datetime.
:rtype: instance of :py:class:`datetime.datetime`
"""
elapsed = self[key]
if elapsed is None:
raise KeyError("No such key \"%s\"." % elapsed)
return self._dt_offset + timedelta(seconds=elapsed)
def __getitem__(self, key):
if key in self._timings:
return self._timings[key] - self._offset
else:
return None
def __iter__(self):
return self._timings.__iter__()
def __str__(self):
return pformat(self._timings)
class EqualityMixin(object):
"""
Mixing to add equality comparison operators to a class.
Two objects are identical under this mixin, if and only if:
1. both object have the same class
2. all non-private object attributes are equal
"""
def __eq__(self, other):
"""
Compare this object to another object for equality.
:param other: The other object to compare with.
:type other: obj
:returns: ``True`` iff the objects are equal.
:rtype: bool
"""
if not isinstance(other, self.__class__):
return False
# we only want the actual message data attributes (not eg _serialize)
for k in self.__dict__:
if not k.startswith('_'):
if not self.__dict__[k] == other.__dict__[k]:
return False
return True
# return (isinstance(other, self.__class__) and self.__dict__ == other.__dict__)
def __ne__(self, other):
"""
Compare this object to another object for inequality.
:param other: The other object to compare with.
:type other: obj
:returns: ``True`` iff the objects are not equal.
:rtype: bool
"""
return not self.__eq__(other)
def wildcards2patterns(wildcards):
"""
Compute a list of regular expression patterns from a list of
wildcard strings. A wildcard string uses '*' as a wildcard character
matching anything.
:param wildcards: List of wildcard strings to compute regular expression patterns for.
:type wildcards: list of str
:returns: Computed regular expressions.
:rtype: list of obj
"""
return [re.compile(wc.replace('.', '\.').replace('*', '.*')) for wc in wildcards]
| |
#!/usr/bin/python
# $Id:$
import ctypes
import pyglet
from pyglet import com
from pyglet.window.win32 import _kernel32
lib = ctypes.oledll.dinput8
LPVOID = ctypes.c_void_p
WORD = ctypes.c_uint16
DWORD = ctypes.c_uint32
LPDWORD = ctypes.POINTER(DWORD)
BOOL = ctypes.c_int
WCHAR = ctypes.c_wchar
UINT = ctypes.c_uint
HWND = ctypes.c_uint32
MAX_PATH = 260
DIENUM_STOP = 0
DIENUM_CONTINUE = 1
DIEDFL_ALLDEVICES = 0x00000000
DIEDFL_ATTACHEDONLY = 0x00000001
DIEDFL_FORCEFEEDBACK = 0x00000100
DIEDFL_INCLUDEALIASES = 0x00010000
DIEDFL_INCLUDEPHANTOMS = 0x00020000
DIEDFL_INCLUDEHIDDEN = 0x00040000
DI8DEVCLASS_ALL = 0
DI8DEVCLASS_DEVICE = 1
DI8DEVCLASS_POINTER = 2
DI8DEVCLASS_KEYBOARD = 3
DI8DEVCLASS_GAMECTRL = 4
DI8DEVTYPE_DEVICE = 0x11
DI8DEVTYPE_MOUSE = 0x12
DI8DEVTYPE_KEYBOARD = 0x13
DI8DEVTYPE_JOYSTICK = 0x14
DI8DEVTYPE_GAMEPAD = 0x15
DI8DEVTYPE_DRIVING = 0x16
DI8DEVTYPE_FLIGHT = 0x17
DI8DEVTYPE_1STPERSON = 0x18
DI8DEVTYPE_DEVICECTRL = 0x19
DI8DEVTYPE_SCREENPOINTER = 0x1A
DI8DEVTYPE_REMOTE = 0x1B
DI8DEVTYPE_SUPPLEMENTAL = 0x1C
DI8DEVTYPEMOUSE_UNKNOWN = 1
DI8DEVTYPEMOUSE_TRADITIONAL = 2
DI8DEVTYPEMOUSE_FINGERSTICK = 3
DI8DEVTYPEMOUSE_TOUCHPAD = 4
DI8DEVTYPEMOUSE_TRACKBALL = 5
DI8DEVTYPEMOUSE_ABSOLUTE = 6
DI8DEVTYPEKEYBOARD_UNKNOWN = 0
DI8DEVTYPEKEYBOARD_PCXT = 1
DI8DEVTYPEKEYBOARD_OLIVETTI = 2
DI8DEVTYPEKEYBOARD_PCAT = 3
DI8DEVTYPEKEYBOARD_PCENH = 4
DI8DEVTYPEKEYBOARD_NOKIA1050 = 5
DI8DEVTYPEKEYBOARD_NOKIA9140 = 6
DI8DEVTYPEKEYBOARD_NEC98 = 7
DI8DEVTYPEKEYBOARD_NEC98LAPTOP = 8
DI8DEVTYPEKEYBOARD_NEC98106 = 9
DI8DEVTYPEKEYBOARD_JAPAN106 = 10
DI8DEVTYPEKEYBOARD_JAPANAX = 11
DI8DEVTYPEKEYBOARD_J3100 = 12
DI8DEVTYPE_LIMITEDGAMESUBTYPE = 1
DI8DEVTYPEJOYSTICK_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPEJOYSTICK_STANDARD = 2
DI8DEVTYPEGAMEPAD_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPEGAMEPAD_STANDARD = 2
DI8DEVTYPEGAMEPAD_TILT = 3
DI8DEVTYPEDRIVING_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPEDRIVING_COMBINEDPEDALS = 2
DI8DEVTYPEDRIVING_DUALPEDALS = 3
DI8DEVTYPEDRIVING_THREEPEDALS = 4
DI8DEVTYPEDRIVING_HANDHELD = 5
DI8DEVTYPEFLIGHT_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPEFLIGHT_STICK = 2
DI8DEVTYPEFLIGHT_YOKE = 3
DI8DEVTYPEFLIGHT_RC = 4
DI8DEVTYPE1STPERSON_LIMITED = DI8DEVTYPE_LIMITEDGAMESUBTYPE
DI8DEVTYPE1STPERSON_UNKNOWN = 2
DI8DEVTYPE1STPERSON_SIXDOF = 3
DI8DEVTYPE1STPERSON_SHOOTER = 4
DI8DEVTYPESCREENPTR_UNKNOWN = 2
DI8DEVTYPESCREENPTR_LIGHTGUN = 3
DI8DEVTYPESCREENPTR_LIGHTPEN = 4
DI8DEVTYPESCREENPTR_TOUCH = 5
DI8DEVTYPEREMOTE_UNKNOWN = 2
DI8DEVTYPEDEVICECTRL_UNKNOWN = 2
DI8DEVTYPEDEVICECTRL_COMMSSELECTION = 3
DI8DEVTYPEDEVICECTRL_COMMSSELECTION_HARDWIRED = 4
DI8DEVTYPESUPPLEMENTAL_UNKNOWN = 2
DI8DEVTYPESUPPLEMENTAL_2NDHANDCONTROLLER = 3
DI8DEVTYPESUPPLEMENTAL_HEADTRACKER = 4
DI8DEVTYPESUPPLEMENTAL_HANDTRACKER = 5
DI8DEVTYPESUPPLEMENTAL_SHIFTSTICKGATE = 6
DI8DEVTYPESUPPLEMENTAL_SHIFTER = 7
DI8DEVTYPESUPPLEMENTAL_THROTTLE = 8
DI8DEVTYPESUPPLEMENTAL_SPLITTHROTTLE = 9
DI8DEVTYPESUPPLEMENTAL_COMBINEDPEDALS = 10
DI8DEVTYPESUPPLEMENTAL_DUALPEDALS = 11
DI8DEVTYPESUPPLEMENTAL_THREEPEDALS = 12
DI8DEVTYPESUPPLEMENTAL_RUDDERPEDALS = 13
DIDC_ATTACHED = 0x00000001
DIDC_POLLEDDEVICE = 0x00000002
DIDC_EMULATED = 0x00000004
DIDC_POLLEDDATAFORMAT = 0x00000008
DIDC_FORCEFEEDBACK = 0x00000100
DIDC_FFATTACK = 0x00000200
DIDC_FFFADE = 0x00000400
DIDC_SATURATION = 0x00000800
DIDC_POSNEGCOEFFICIENTS = 0x00001000
DIDC_POSNEGSATURATION = 0x00002000
DIDC_DEADBAND = 0x00004000
DIDC_STARTDELAY = 0x00008000
DIDC_ALIAS = 0x00010000
DIDC_PHANTOM = 0x00020000
DIDC_HIDDEN = 0x00040000
DIDFT_ALL = 0x00000000
DIDFT_RELAXIS = 0x00000001
DIDFT_ABSAXIS = 0x00000002
DIDFT_AXIS = 0x00000003
DIDFT_PSHBUTTON = 0x00000004
DIDFT_TGLBUTTON = 0x00000008
DIDFT_BUTTON = 0x0000000C
DIDFT_POV = 0x00000010
DIDFT_COLLECTION = 0x00000040
DIDFT_NODATA = 0x00000080
DIDFT_ANYINSTANCE = 0x00FFFF00
DIDFT_INSTANCEMASK = DIDFT_ANYINSTANCE
DIDFT_FFACTUATOR = 0x01000000
DIDFT_FFEFFECTTRIGGER = 0x02000000
DIDFT_OUTPUT = 0x10000000
DIDFT_VENDORDEFINED = 0x04000000
DIDFT_ALIAS = 0x08000000
DIDFT_OPTIONAL = 0x80000000
DIDFT_NOCOLLECTION = 0x00FFFF00
DIA_FORCEFEEDBACK = 0x00000001
DIA_APPMAPPED = 0x00000002
DIA_APPNOMAP = 0x00000004
DIA_NORANGE = 0x00000008
DIA_APPFIXED = 0x00000010
DIAH_UNMAPPED = 0x00000000
DIAH_USERCONFIG = 0x00000001
DIAH_APPREQUESTED = 0x00000002
DIAH_HWAPP = 0x00000004
DIAH_HWDEFAULT = 0x00000008
DIAH_DEFAULT = 0x00000020
DIAH_ERROR = 0x80000000
DIAFTS_NEWDEVICELOW = 0xFFFFFFFF
DIAFTS_NEWDEVICEHIGH = 0xFFFFFFFF
DIAFTS_UNUSEDDEVICELOW = 0x00000000
DIAFTS_UNUSEDDEVICEHIGH = 0x00000000
DIDBAM_DEFAULT = 0x00000000
DIDBAM_PRESERVE = 0x00000001
DIDBAM_INITIALIZE = 0x00000002
DIDBAM_HWDEFAULTS = 0x00000004
DIDSAM_DEFAULT = 0x00000000
DIDSAM_NOUSER = 0x00000001
DIDSAM_FORCESAVE = 0x00000002
DICD_DEFAULT = 0x00000000
DICD_EDIT = 0x00000001
DIDOI_FFACTUATOR = 0x00000001
DIDOI_FFEFFECTTRIGGER = 0x00000002
DIDOI_POLLED = 0x00008000
DIDOI_ASPECTPOSITION = 0x00000100
DIDOI_ASPECTVELOCITY = 0x00000200
DIDOI_ASPECTACCEL = 0x00000300
DIDOI_ASPECTFORCE = 0x00000400
DIDOI_ASPECTMASK = 0x00000F00
DIDOI_GUIDISUSAGE = 0x00010000
DIPH_DEVICE = 0
DIPH_BYOFFSET = 1
DIPH_BYID = 2
DIPH_BYUSAGE = 3
DISCL_EXCLUSIVE = 0x00000001
DISCL_NONEXCLUSIVE = 0x00000002
DISCL_FOREGROUND = 0x00000004
DISCL_BACKGROUND = 0x00000008
DISCL_NOWINKEY = 0x00000010
DIPROP_BUFFERSIZE = 1
class DIDEVICEINSTANCE(ctypes.Structure):
_fields_ = (
('dwSize', DWORD),
('guidInstance', com.GUID),
('guidProduct', com.GUID),
('dwDevType', DWORD),
('tszInstanceName', WCHAR * MAX_PATH),
('tszProductName', WCHAR * MAX_PATH),
('guidFFDriver', com.GUID),
('wUsagePage', WORD),
('wUsage', WORD)
)
LPDIDEVICEINSTANCE = ctypes.POINTER(DIDEVICEINSTANCE)
LPDIENUMDEVICESCALLBACK = ctypes.WINFUNCTYPE(BOOL, LPDIDEVICEINSTANCE, LPVOID)
class DIDEVICEOBJECTINSTANCE(ctypes.Structure):
_fields_ = (
('dwSize', DWORD),
('guidType', com.GUID),
('dwOfs', DWORD),
('dwType', DWORD),
('dwFlags', DWORD),
('tszName', WCHAR * MAX_PATH),
('dwFFMaxForce', DWORD),
('dwFFForceResolution', DWORD),
('wCollectionNumber', WORD),
('wDesignatorIndex', WORD),
('wUsagePage', WORD),
('wUsage', WORD),
('dwDimension', DWORD),
('wExponent', WORD),
('wReportId', WORD)
)
LPDIDEVICEOBJECTINSTANCE = ctypes.POINTER(DIDEVICEOBJECTINSTANCE)
LPDIENUMDEVICEOBJECTSCALLBACK = \
ctypes.WINFUNCTYPE( BOOL, LPDIDEVICEOBJECTINSTANCE, LPVOID)
class DIOBJECTDATAFORMAT(ctypes.Structure):
_fields_ = (
('pguid', ctypes.POINTER(com.GUID)),
('dwOfs', DWORD),
('dwType', DWORD),
('dwFlags', DWORD)
)
__slots__ = [n for n, t in _fields_]
LPDIOBJECTDATAFORMAT = ctypes.POINTER(DIOBJECTDATAFORMAT)
class DIDATAFORMAT(ctypes.Structure):
_fields_ = (
('dwSize', DWORD),
('dwObjSize', DWORD),
('dwFlags', DWORD),
('dwDataSize', DWORD),
('dwNumObjs', DWORD),
('rgodf', LPDIOBJECTDATAFORMAT)
)
__slots__ = [n for n, t in _fields_]
LPDIDATAFORMAT = ctypes.POINTER(DIDATAFORMAT)
class DIDEVICEOBJECTDATA(ctypes.Structure):
_fields_ = (
('dwOfs', DWORD),
('dwData', DWORD),
('dwTimeStamp', DWORD),
('dwSequence', DWORD),
('uAppData', ctypes.POINTER(UINT))
)
LPDIDEVICEOBJECTDATA = ctypes.POINTER(DIDEVICEOBJECTDATA)
class DIPROPHEADER(ctypes.Structure):
_fields_ = (
('dwSize', DWORD),
('dwHeaderSize', DWORD),
('dwObj', DWORD),
('dwHow', DWORD)
)
LPDIPROPHEADER = ctypes.POINTER(DIPROPHEADER)
class DIPROPDWORD(ctypes.Structure):
_fields_ = (
('diph', DIPROPHEADER),
('dwData', DWORD)
)
# All method names in the interfaces are filled in, but unused (so far)
# methods have no parameters.. they'll crash when we try and use them, at
# which point we can go in and fill them in.
# IDirect* interfaces are all Unicode (e.g. IDirectInputDevice8W).
class IDirectInputDevice8(com.IUnknown):
_methods_ = [
('GetCapabilities',
com.STDMETHOD()),
('EnumObjects',
com.STDMETHOD(LPDIENUMDEVICEOBJECTSCALLBACK, LPVOID, DWORD)),
('GetProperty',
com.STDMETHOD()),
('SetProperty',
com.STDMETHOD(LPVOID, LPDIPROPHEADER)),
('Acquire',
com.STDMETHOD()),
('Unacquire',
com.STDMETHOD()),
('GetDeviceState',
com.STDMETHOD()),
('GetDeviceData',
com.STDMETHOD(DWORD, LPDIDEVICEOBJECTDATA, LPDWORD, DWORD)),
('SetDataFormat',
com.STDMETHOD(LPDIDATAFORMAT)),
('SetEventNotification',
com.STDMETHOD()),
('SetCooperativeLevel',
com.STDMETHOD(HWND, DWORD)),
('GetObjectInfo',
com.STDMETHOD()),
('GetDeviceInfo',
com.STDMETHOD()),
('RunControlPanel',
com.STDMETHOD()),
('Initialize',
com.STDMETHOD()),
('CreateEffect',
com.STDMETHOD()),
('EnumEffects',
com.STDMETHOD()),
('GetEffectInfo',
com.STDMETHOD()),
('GetForceFeedbackState',
com.STDMETHOD()),
('SendForceFeedbackCommand',
com.STDMETHOD()),
('EnumCreatedEffectObjects',
com.STDMETHOD()),
('Escape',
com.STDMETHOD()),
('Poll',
com.STDMETHOD()),
('SendDeviceData',
com.STDMETHOD()),
('EnumEffectsInFile',
com.STDMETHOD()),
('WriteEffectToFile',
com.STDMETHOD()),
('BuildActionMap',
com.STDMETHOD()),
('SetActionMap',
com.STDMETHOD()),
('GetImageInfo',
com.STDMETHOD()),
]
class IDirectInput8(com.IUnknown):
_methods_ = [
('CreateDevice',
com.STDMETHOD(ctypes.POINTER(com.GUID),
ctypes.POINTER(IDirectInputDevice8),
ctypes.c_void_p)),
('EnumDevices',
com.STDMETHOD(DWORD, LPDIENUMDEVICESCALLBACK, LPVOID, DWORD)),
('GetDeviceStatus',
com.STDMETHOD()),
('RunControlPanel',
com.STDMETHOD()),
('Initialize',
com.STDMETHOD()),
('FindDevice',
com.STDMETHOD()),
('EnumDevicesBySemantics',
com.STDMETHOD()),
('ConfigureDevices',
com.STDMETHOD()),
]
IID_IDirectInput8W = \
com.GUID(0xBF798031,0x483A,0x4DA2,0xAA,0x99,0x5D,0x64,0xED,0x36,0x97,0x00)
lib.DirectInput8Create.argtypes = \
(ctypes.c_void_p, DWORD, com.LPGUID, ctypes.c_void_p, ctypes.c_void_p)
class Element(object):
value = None
def __init__(self, object_instance):
self.name = object_instance.tszName
self._flags = object_instance.dwFlags
self._guid = object_instance.guidType
self._type = object_instance.dwType
def get_value(self):
return self.value
class Device(object):
def __init__(self, device, device_instance):
self.name = device_instance.tszInstanceName
#print self.name, hex(device_instance.dwDevType & 0xff), \
# hex(device_instance.dwDevType & 0xff00)
#print hex(device_instance.wUsagePage), hex(device_instance.wUsage)
self._device = device
self._init_elements()
self._set_format()
def _init_elements(self):
self.elements = []
self._device.EnumObjects(
LPDIENUMDEVICEOBJECTSCALLBACK(self._object_enum), None, DIDFT_ALL)
def _object_enum(self, object_instance, arg):
type = object_instance.contents.dwType
flags = object_instance.contents.dwFlags
if type & DIDFT_NODATA:
return DIENUM_CONTINUE
element = Element(object_instance.contents)
self.elements.append(element)
return DIENUM_CONTINUE
def _set_format(self):
if not self.elements:
return
object_formats = (DIOBJECTDATAFORMAT * len(self.elements))()
offset = 0
for object_format, element in zip(object_formats, self.elements):
object_format.dwOfs = offset
object_format.dwType = element._type
offset += 4
format = DIDATAFORMAT()
format.dwSize = ctypes.sizeof(format)
format.dwObjSize = ctypes.sizeof(DIOBJECTDATAFORMAT)
format.dwFlags = 0
format.dwDataSize = offset
format.dwNumObjs = len(object_formats)
format.rgodf = ctypes.cast(ctypes.pointer(object_formats),
LPDIOBJECTDATAFORMAT)
self._device.SetDataFormat(format)
prop = DIPROPDWORD()
prop.diph.dwSize = ctypes.sizeof(prop)
prop.diph.dwHeaderSize = ctypes.sizeof(prop.diph)
prop.diph.dwObj = 0
prop.diph.dwHow = DIPH_DEVICE
prop.dwData = 64 * ctypes.sizeof(DIDATAFORMAT)
self._device.SetProperty(DIPROP_BUFFERSIZE, ctypes.byref(prop.diph))
def open(self, window=None):
if not self.elements:
return
if window is None:
# Pick any open window, or the shadow window if no windows
# have been created yet.
window = pyglet.gl._shadow_window
for window in pyglet.app.windows:
break
self._device.SetCooperativeLevel(window._hwnd,
DISCL_BACKGROUND | DISCL_NONEXCLUSIVE)
self._device.Acquire()
# XXX HACK
pyglet.clock.schedule(self.dispatch_events)
def close(self):
if not self.elements:
return
self._device.Unacquire()
# XXX HACK?
def dispatch_events(self, dt): # dt HACK
if not self.elements:
return
events = (DIDEVICEOBJECTDATA * 64)()
n_events = DWORD(len(events))
self._device.GetDeviceData(ctypes.sizeof(DIDEVICEOBJECTDATA),
ctypes.cast(ctypes.pointer(events),
LPDIDEVICEOBJECTDATA),
ctypes.byref(n_events),
0)
for event in events[:n_events.value]:
index = event.dwOfs // 4
self.elements[index].value = event.dwData
def _device_enum(device_instance, arg):
device = IDirectInputDevice8()
dinput.CreateDevice(device_instance.contents.guidInstance,
ctypes.byref(device),
None)
_devices.append(Device(device, device_instance.contents))
return DIENUM_CONTINUE
def get_devices():
global _devices
_devices = []
dinput.EnumDevices(DI8DEVCLASS_ALL, LPDIENUMDEVICESCALLBACK(_device_enum),
None, DIEDFL_ATTACHEDONLY)
return _devices
def _init_directinput():
global dinput
dinput = IDirectInput8()
module = _kernel32.GetModuleHandleW(None)
DIRECTINPUT_VERSION = 0x0800
lib.DirectInput8Create(module, DIRECTINPUT_VERSION,
IID_IDirectInput8W, ctypes.byref(dinput), None)
_init_directinput()
'''
#for device in get_devices():
device = get_devices()[0]
device.open(w)
print device.name
pyglet.app.run()
'''
| |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating batch predictions
"""
from world import world, setup_module, teardown_module
import create_source_steps as source_create
import create_dataset_steps as dataset_create
import create_model_steps as model_create
import create_ensemble_steps as ensemble_create
import create_cluster_steps as cluster_create
import create_anomaly_steps as anomaly_create
import create_batch_prediction_steps as batch_pred_create
import create_prediction_steps as prediction_create
class TestBatchPrediction(object):
def test_scenario1(self):
"""
Scenario: Successfully creating a batch prediction:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
When I create a batch prediction for the dataset with the model
And I wait until the batch prediction is ready less than <time_4> secs
And I download the created predictions file to "<local_file>"
Then the batch prediction file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | local_file | predictions_file |
| ../data/iris.csv | 30 | 30 | 50 | 50 | ./tmp/batch_predictions.csv |./data/batch_predictions.csv |
"""
print self.test_scenario1.__doc__
examples = [
['data/iris.csv', '30', '30', '50', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions.csv']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction(self)
batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[4])
batch_pred_create.i_download_predictions_file(self, example[5])
batch_pred_create.i_check_predictions(self, example[6])
def test_scenario2(self):
"""
Scenario: Successfully creating a batch prediction for an ensemble:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an ensemble of <number_of_models> models and <tlp> tlp
And I wait until the ensemble is ready less than <time_3> secs
When I create a batch prediction for the dataset with the ensemble
And I wait until the batch prediction is ready less than <time_4> secs
And I download the created predictions file to "<local_file>"
Then the batch prediction file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | number_of_models | tlp | time_3 | time_4 | local_file | predictions_file |
| ../data/iris.csv | 30 | 30 | 5 | 1 | 80 | 50 | ./tmp/batch_predictions.csv | ./data/batch_predictions_e.csv |
"""
print self.test_scenario2.__doc__
examples = [
['data/iris.csv', '30', '30', '5', '1', '80', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions_e.csv']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
ensemble_create.i_create_an_ensemble(self, example[3], example[4])
ensemble_create.the_ensemble_is_finished_in_less_than(self, example[5])
batch_pred_create.i_create_a_batch_prediction_ensemble(self)
batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[6])
batch_pred_create.i_download_predictions_file(self, example[7])
batch_pred_create.i_check_predictions(self, example[8])
def test_scenario3(self):
"""
Scenario: Successfully creating a batch centroid from a cluster:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a cluster
And I wait until the cluster is ready less than <time_3> secs
When I create a batch centroid for the dataset
And I check the batch centroid is ok
And I wait until the batch centroid is ready less than <time_4> secs
And I download the created centroid file to "<local_file>"
Then the batch centroid file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | local_file | predictions_file |
| ../data/diabetes.csv | 50 | 50 | 50 | 50 | ./tmp/batch_predictions.csv |./data/batch_predictions_c.csv |
"""
print self.test_scenario3.__doc__
examples = [
['data/diabetes.csv', '50', '50', '50', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions_c.csv']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
cluster_create.i_create_a_cluster(self)
cluster_create.the_cluster_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction_with_cluster(self)
batch_pred_create.the_batch_centroid_is_finished_in_less_than(self, example[4])
batch_pred_create.i_download_centroid_file(self, example[5])
batch_pred_create.i_check_predictions(self, example[6])
def test_scenario4(self):
"""
Scenario: Successfully creating a source from a batch prediction:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a model
And I wait until the model is ready less than <time_3> secs
When I create a batch prediction for the dataset with the model
And I wait until the batch prediction is ready less than <time_4> secs
Then I create a source from the batch prediction
And I wait until the source is ready less than <time_1> secs
Examples:
| data | time_1 | time_2 | time_3 | time_4 |
| ../data/iris.csv | 30 | 30 | 50 | 50 |
"""
print self.test_scenario4.__doc__
examples = [
['data/diabetes.csv', '30', '30', '50', '50']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
model_create.i_create_a_model(self)
model_create.the_model_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction(self)
batch_pred_create.the_batch_prediction_is_finished_in_less_than(self, example[4])
batch_pred_create.i_create_a_source_from_batch_prediction(self)
source_create.the_source_is_finished(self, example[1])
def test_scenario5(self):
"""
Scenario: Successfully creating a batch anomaly score from an anomaly detector:
Given I create a data source uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create an anomaly detector
And I wait until the anomaly detector is ready less than <time_3> secs
When I create a batch anomaly score
And I check the batch anomaly score is ok
And I wait until the batch anomaly score is ready less than <time_4> secs
And I download the created anomaly score file to "<local_file>"
Then the batch anomaly score file is like "<predictions_file>"
Examples:
| data | time_1 | time_2 | time_3 | time_4 | local_file | predictions_file |
| ../data/tiny_kdd.csv | 30 | 30 | 50 | 50 | ./tmp/batch_predictions.csv |./data/batch_predictions_a.csv |
"""
print self.test_scenario5.__doc__
examples = [
['data/tiny_kdd.csv', '30', '30', '50', '50', 'tmp/batch_predictions.csv', 'data/batch_predictions_a.csv']]
for example in examples:
print "\nTesting with:\n", example
source_create.i_upload_a_file(self, example[0])
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self, example[2])
anomaly_create.i_create_an_anomaly(self)
anomaly_create.the_anomaly_is_finished_in_less_than(self, example[3])
batch_pred_create.i_create_a_batch_prediction_with_anomaly(self)
batch_pred_create.the_batch_anomaly_score_is_finished_in_less_than(self, example[4])
batch_pred_create.i_download_anomaly_score_file(self, example[5])
batch_pred_create.i_check_predictions(self, example[6])
| |
#file : InMoov3.hand+arm+rockpaperscissors.py
from java.lang import String
import threading
import time
import random
# this will run with versions of MRL above 1695
# a very minimal script for InMoov
# although this script is very short you can still
# do voice control of a right hand or finger box
# It uses WebkitSpeechRecognition, so you need to use Chrome as your default browser for this script to work
# Start the webgui service without starting the browser
webgui = Runtime.create("WebGui","WebGui")
webgui.autoStartBrowser(False)
webgui.startService()
# Then start the browsers and show the WebkitSpeechRecognition service named i01.ear
webgui.startBrowser("http://localhost:8888/#/service/i01.ear")
# As an alternative you can use the line below to show all services in the browser. In that case you should comment out all lines above that starts with webgui.
# webgui = Runtime.createAndStart("webgui","WebGui")
# play rock paper scissors
inmoov = 0
human = 0
# Change to the port that you use
rightPort = "COM7"
ear = Runtime.createAndStart("i01.ear", "WebkitSpeechRecognition")
ear.addListener("publishText", python.name, "heard");
######################################################################
def heard(data):
print "Speech Recognition Data:"+str(data)
######################################################################
#to tweak the default voice
Voice="cmu-slt-hsmm" # Default female for MarySpeech
#Voice="cmu-bdl" #Male US voice.You need to add the necessary file.jar to myrobotlab.1.0.XXXX/library/jar
#https://github.com/MyRobotLab/pyrobotlab/blob/ff6e2cef4d0642e47ee15e353ef934ac6701e713/home/hairygael/voice-cmu-bdl-5.2.jar
voiceType = Voice
mouth = Runtime.createAndStart("i01.mouth", "MarySpeech")
mouth.setVoice(voiceType)
##############
# starting parts
i01 = Runtime.createAndStart("i01", "InMoov")
i01.startEar()
i01.startMouth()
##############
i01.startRightHand(rightPort)
# tweaking defaults settings of right hand
#i01.rightHand.thumb.setMinMax(55,135)
#i01.rightHand.index.setMinMax(0,160)
#i01.rightHand.majeure.setMinMax(0,140)
#i01.rightHand.ringFinger.setMinMax(48,145)
#i01.rightHand.pinky.setMinMax(45,146)
#i01.rightHand.thumb.map(0,180,55,135)
#i01.rightHand.index.map(0,180,0,160)
#i01.rightHand.majeure.map(0,180,0,140)
#i01.rightHand.ringFinger.map(0,180,48,145)
#i01.rightHand.pinky.map(0,180,45,146)
#################
i01.startRightArm(rightPort)
# tweak default RightArm
#i01.rightArm.bicep.setMinMax(0,90)
#i01.rightArm.rotate.setMinMax(46,160)
#i01.rightArm.shoulder.setMinMax(30,100)
#i01.rightArm.omoplate.setMinMax(10,75)
# verbal commands
ear = i01.ear
ear.addCommand("attach your right hand", "i01.rightHand", "attach")
ear.addCommand("disconnect your right hand", "i01.rightHand", "detach")
ear.addCommand("rest", i01.getName(), "rest")
ear.addCommand("open your hand", "python", "handopen")
ear.addCommand("close your hand", "python", "handclose")
ear.addCommand("capture gesture", ear.getName(), "captureGesture")
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addCommand("rock paper scissors", "python", "rockpaperscissors")
ear.addCommand("play", "python", "rockpaperscissors2")
ear.addCommand("ready", "python", "ready")
ear.addCommand("rock", "python", "rock")
ear.addCommand("paper", "python", "paper")
ear.addCommand("scissors", "python", "scissors")
# Confirmations and Negations are not supported yet in WebkitSpeechRecognition
# So commands will execute immediatley
ear.addComfirmations("yes","correct","yeah","ya")
ear.addNegations("no","wrong","nope","nah")
ear.addListener("recognized", "python", "heard")
#Direct verbal commands("yes | no | i have rock | i have paper | i have scissors")
ear.startListening()
def handopen():
i01.moveHand("right",0,0,0,0,0)
i01.mouth.speak("ok I open my hand")
def handclose():
i01.moveHand("right",180,180,180,180,180)
i01.mouth.speak("a nice and wide open hand that is")
def fullspeed():
i01.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
i01.setArmSpeed("right", 1.0, 1.0, 1.0, 1.0)
def rockpaperscissors():
fullspeed()
i01.mouth.speak("lets play first to 3 points win")
sleep(4)
rockpaperscissors2()
def rockpaperscissors2():
x = (random.randint(1, 3))
global inmoov
global human
if x == 1:
ready()
sleep(2)
rock()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("zero zero")
if x == 2:
i01.mouth.speak("no no")
if x == 3:
i01.mouth.speak("no points")
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("paper beats rock")
if x == 2:
i01.mouth.speak("your point")
if x == 3:
i01.mouth.speak("you got this one")
human += 1
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("1 point for me")
if x == 2:
i01.mouth.speak("going fine")
if x == 3:
i01.mouth.speak("rock beats scissors")
inmoov += 1
sleep(1)
if x == 2:
ready()
sleep(2)
paper()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("1 point")
if x == 2:
i01.mouth.speak("paper beats rock")
if x == 3:
i01.mouth.speak("my point")
inmoov += 1
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("no points")
if x == 2:
i01.mouth.speak("ok lets try again")
sleep(2)
if x == 3:
i01.mouth.speak("again")
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("oh no you get 1 point")
if x == 2:
i01.mouth.speak("this is not good for me")
if x == 3:
i01.mouth.speak("your point")
human += 1
sleep(1)
if x == 3:
ready()
sleep(2)
scissors()
sleep(2)
data = msg_i01_ear_recognized.data[0]
if (data == "i have rock"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("oh no")
if x == 2:
i01.mouth.speak("rock beats scissors")
if x == 3:
i01.mouth.speak("i feel generous today")
human += 1
sleep(1)
if (data == "i have paper"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("i've got you")
if x == 2:
i01.mouth.speak("my point")
if x == 3:
i01.mouth.speak("good")
inmoov += 1
sleep(1)
if (data == "i have scissors"):
x = (random.randint(1, 3))
if x == 1:
i01.mouth.speak("no no")
if x == 2:
i01.mouth.speak("zero zero")
if x == 3:
i01.mouth.speak("no points")
sleep(1)
if inmoov == 3:
stoprockpaperscissors()
sleep(1)
elif human == 3: # changed from if to elif
stoprockpaperscissors()
sleep(1)
elif inmoov <= 2: # changed from if to elif
rockpaperscissors2()
elif human <= 2: # changed from if to elif
rockpaperscissors2()
def stoprockpaperscissors():
rest()
sleep(5)
if inmoov < human:
i01.mouth.speak("congratulations you won with" + str(human - inmoov) + "points")
sleep(3)
i01.mouth.speak(str(human) + "points to you and" + str(inmoov) + "points to me")
elif inmoov > human: # changed from if to elif
i01.mouth.speak("yes yes i won with" + str(inmoov - human) + "points")
sleep(3)
i01.mouth.speak("i've got " + str(inmoov) + "points and you got" + str(human) + "points")
elif inmoov == human: # changed from if to elif
i01.mouth.speak("none of us won we both got" + str(inmoov) + "points")
global inmoov
inmoov = 0
global human
human = 0
i01.mouth.speak("that was fun")
sleep(2)
i01.mouth.speak("do you want to play again")
sleep(10)
data = msg_i01_ear_recognized.data[0]
if (data == "yes let's play again"):
rockpaperscissors2()
elif (data == "yes"): # changed from if to elif
rockpaperscissors2()
elif (data == "no thanks"): # changed from if to elif
i01.mouth.speak("maybe some other time")
sleep(4)
power_down()
elif (data == "no thank you"): # changed from if to elif
i01.mouth.speak("maybe some other time")
sleep(4)
power_down()
def ready():
i01.mouth.speak("ready")
i01.mouth.speak("go")
i01.moveHead(90,90)
i01.moveArm("left",65,90,75,10)
i01.moveArm("right",20,80,25,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
def rock():
fullspeed()
i01.moveHead(90,90)
i01.moveArm("left",70,90,80,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",80,90,85,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",90,90,90,10)
i01.moveArm("right",20,85,10,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",45,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,80)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have rock what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
def paper():
fullspeed()
i01.moveHead(90,90)
i01.moveArm("left",70,90,80,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",80,90,85,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",90,90,90,10)
i01.moveArm("right",20,85,10,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(90,90)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",0,0,0,0,0,165)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have paper what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
def scissors():
fullspeed()
i01.moveHead(90,90)
i01.moveArm("left",70,90,80,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",80,90,85,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.setHeadSpeed(.8,.8)
i01.moveHead(60,107)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveArm("left",90,90,90,10)
i01.moveArm("right",20,85,10,20)
i01.moveHand("left",130,180,180,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.5)
i01.moveHead(90,90)
i01.moveArm("left",49,90,75,10)
i01.moveArm("right",20,80,20,20)
i01.moveHand("left",50,0,0,180,180,90)
i01.moveHand("right",50,90,90,90,100,90)
sleep(.3)
x = (random.randint(1, 2))
if x == 1:
i01.mouth.speakBlocking("i have scissors what do you have")
if x == 2:
i01.mouth.speakBlocking("what do you have")
| |
import json
import os
import uuid
from unittest import SkipTest
from django.conf import settings
from django.contrib.sites.models import Site
from django.test import LiveServerTestCase
from dateutil.parser import parse
from nose.tools import nottest
from casexml.apps.case.mock import CaseBlock
from casexml.apps.case.util import post_case_blocks
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.base_tests import BaseAccountingTest
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import clear_plan_version_cache
from corehq.apps.app_manager.models import import_app
from corehq.apps.domain.models import Domain
from corehq.apps.groups.models import Group
from corehq.apps.sms.api import process_username
from corehq.apps.sms.models import (
OUTGOING,
SMS,
Keyword,
KeywordAction,
PhoneNumber,
SQLMobileBackend,
SQLMobileBackendMapping,
)
from corehq.apps.smsforms.models import SQLXFormsSession
from corehq.apps.users.models import CommCareUser, WebUser
from corehq.form_processor.models import CommCareCase, XFormInstance
from corehq.messaging.smsbackends.test.models import SQLTestSMSBackend
from corehq.util.test_utils import unit_testing_only
def time_parser(value):
return parse(value).time()
@nottest
def setup_default_sms_test_backend():
backend = SQLTestSMSBackend.objects.create(
name='MOBILE_BACKEND_TEST',
is_global=True,
hq_api_id=SQLTestSMSBackend.get_api_id()
)
backend_mapping = SQLMobileBackendMapping.objects.create(
is_global=True,
backend_type=SQLMobileBackend.SMS,
prefix='*',
backend=backend,
)
return (backend, backend_mapping)
class BaseSMSTest(BaseAccountingTest, DomainSubscriptionMixin):
def setUp(self):
super(BaseSMSTest, self).setUp()
self.account = None
self.subscription = None
@classmethod
def create_account_and_subscription(cls, domain_name):
cls.setup_subscription(domain_name, SoftwarePlanEdition.ADVANCED)
def tearDown(self):
self.teardown_subscriptions()
clear_plan_version_cache()
super(BaseSMSTest, self).tearDown()
class TouchformsTestCase(LiveServerTestCase, DomainSubscriptionMixin):
"""
For now, these test cases need to be run manually. Before running, the
following dependencies must be met:
1. formplayer/config/application.properties:
- Update these entries:
commcarehq.host=http://localhost:8082
commcarehq.formplayerAuthKey=abc
touchforms.username=touchforms_user
touchforms.password=123
- Update couch.* and datasource.hq.* to point to the respective
test databases
- Make sure your local directory referenced by sqlite.dataDir is
completely empty
2. Start formplayer
3. Django localsettings.py:
FORMPLAYER_INTERNAL_AUTH_KEY = "abc"
"""
users = None
apps = None
keywords = None
groups = None
# Always start the test live server on port 8082
port = 8082
def create_domain(self, domain):
domain_obj = Domain(name=domain)
domain_obj.use_default_sms_response = True
domain_obj.default_sms_response = "Default SMS Response"
domain_obj.save()
self.setup_subscription(domain_obj.name, SoftwarePlanEdition.ADVANCED)
return domain_obj
def create_mobile_worker(self, username, password, phone_number, save_vn=True):
processed_username = process_username(username, self.domain)
user = CommCareUser.create(self.domain, processed_username, password, None, None,
phone_number=phone_number)
if save_vn:
entry = user.get_or_create_phone_entry(phone_number)
entry.set_two_way()
entry.set_verified()
entry.save()
self.users.append(user)
return user
def update_case_owner(self, case, owner):
case_block = CaseBlock.deprecated_init(
create=False,
case_id=case.case_id,
case_type='participant',
owner_id=owner.get_id,
user_id=owner.get_id,
).as_xml()
post_case_blocks([case_block], {'domain': self.domain})
def add_parent_access(self, user, case):
case_block = CaseBlock.deprecated_init(
create=True,
case_id=uuid.uuid4().hex,
case_type='magic_map',
owner_id=user.get_id,
index={'parent': ('participant', case.case_id)}
).as_xml()
post_case_blocks([case_block], {'domain': self.domain})
def create_web_user(self, username, password):
user = WebUser.create(self.domain, username, password, None, None)
self.users.append(user)
return user
def create_group(self, name, users):
group = Group(
domain=self.domain,
name=name,
users=[user.get_id for user in users],
case_sharing=True,
)
group.save()
self.groups.append(group)
return group
def load_app(self, filename, dirname=None):
dirname = dirname or os.path.dirname(os.path.abspath(__file__))
full_filename = "%s/%s" % (dirname, filename)
with open(full_filename, "r") as f:
app_source = f.read()
app_source = json.loads(app_source)
app = import_app(app_source, self.domain)
self.apps.append(app)
return app
def create_sms_keyword(self, keyword, reply_sms,
override_open_sessions=True, initiator_filter=None,
recipient=KeywordAction.RECIPIENT_SENDER, recipient_id=None):
k = Keyword(
domain=self.domain,
keyword=keyword,
description=keyword,
override_open_sessions=override_open_sessions,
initiator_doc_type_filter=initiator_filter or [],
)
k.save()
k.keywordaction_set.create(
recipient=recipient,
recipient_id=recipient_id,
action=KeywordAction.ACTION_SMS,
message_content=reply_sms,
)
def create_survey_keyword(self, keyword, app_id, form_unique_id, delimiter=None,
override_open_sessions=True, initiator_filter=None):
k = Keyword(
domain=self.domain,
keyword=keyword,
description=keyword,
delimiter=delimiter,
override_open_sessions=override_open_sessions,
initiator_doc_type_filter=initiator_filter or [],
)
k.save()
k.keywordaction_set.create(
recipient=KeywordAction.RECIPIENT_SENDER,
action=KeywordAction.ACTION_SMS_SURVEY,
app_id=app_id,
form_unique_id=form_unique_id,
)
def create_structured_sms_keyword(self, keyword, app_id, form_unique_id, reply_sms,
delimiter=None, named_args=None, named_args_separator=None,
override_open_sessions=True, initiator_filter=None):
k = Keyword(
domain=self.domain,
keyword=keyword,
description=keyword,
delimiter=delimiter,
override_open_sessions=override_open_sessions,
initiator_doc_type_filter=initiator_filter or [],
)
k.save()
k.keywordaction_set.create(
recipient=KeywordAction.RECIPIENT_SENDER,
action=KeywordAction.ACTION_SMS,
message_content=reply_sms,
)
k.keywordaction_set.create(
recipient=KeywordAction.RECIPIENT_SENDER,
action=KeywordAction.ACTION_STRUCTURED_SMS,
app_id=app_id,
form_unique_id=form_unique_id,
use_named_args=(named_args is not None),
named_args=(named_args or {}),
named_args_separator=named_args_separator,
)
def create_site(self):
site = Site(id=settings.SITE_ID, domain=self.live_server_url,
name=self.live_server_url)
site.save()
return site
def get_case(self, external_id):
case = CommCareCase.objects.get_case_by_external_id(
self.domain, external_id, raise_multiple=True)
if case is None:
raise CommCareCase.DoesNotExist
return case
def assertCasePropertyEquals(self, case, prop, value):
self.assertEqual(case.get_case_property(prop), value)
def get_last_form_submission(self):
result = XFormInstance.objects.get_forms_by_type(self.domain, 'XFormInstance', 1, recent_first=True)
return result[0] if len(result) > 0 else None
def assertNoNewSubmission(self, last_submission):
new_submission = self.get_last_form_submission()
self.assertEqual(last_submission.form_id, new_submission.form_id)
def assertFormQuestionEquals(self, form, question, value, cast=None):
self.assertIn(question, form.form_data)
form_value = form.form_data[question]
if cast:
form_value = cast(form_value)
self.assertEqual(form_value, value)
def get_last_outbound_sms(self, contact):
return SMS.get_last_log_for_recipient(
contact.doc_type,
contact.get_id,
direction=OUTGOING
)
def get_open_session(self, contact):
return SQLXFormsSession.get_open_sms_session(self.domain, contact._id)
def assertLastOutboundSMSEquals(self, contact, message):
sms = self.get_last_outbound_sms(contact)
self.assertIsNotNone(sms)
self.assertEqual(sms.text, message)
return sms
def assertMetadataEqual(self, sms, xforms_session_couch_id=None, workflow=None):
if xforms_session_couch_id:
self.assertEqual(sms.xforms_session_couch_id, xforms_session_couch_id)
if workflow:
self.assertEqual(sms.workflow, workflow)
@classmethod
def setUpClass(cls):
if getattr(settings, "SKIP_TOUCHFORMS_TESTS", False):
raise SkipTest("because settings.SKIP_TOUCHFORMS_TESTS")
super(TouchformsTestCase, cls).setUpClass()
def setUp(self):
self.users = []
self.apps = []
self.keywords = []
self.groups = []
self.site = self.create_site()
self.domain = "test-domain"
self.domain_obj = self.create_domain(self.domain)
self.create_web_user("touchforms_user", "123")
self.backend, self.backend_mapping = setup_default_sms_test_backend()
settings.DEBUG = True
def tearDown(self):
delete_domain_phone_numbers(self.domain)
for user in self.users:
user.delete(self.domain, deleted_by=None)
for app in self.apps:
app.delete()
for keyword in self.keywords:
keyword.delete()
for group in self.groups:
group.delete()
self.domain_obj.delete()
self.site.delete()
self.backend_mapping.delete()
self.backend.delete()
self.teardown_subscriptions()
clear_plan_version_cache()
@unit_testing_only
def delete_domain_phone_numbers(domain):
for p in PhoneNumber.by_domain(domain):
# Clear cache and delete
p.delete()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.