code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for recurrent layers functionality other than GRU, LSTM, SimpleRNN.
See also: lstm_test.py, gru_test.py, simplernn_test.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.layers import recurrent as rnn_v1
from tensorflow.python.keras.layers import recurrent_v2 as rnn_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell
from tensorflow.python.ops import special_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.training.tracking import object_identity
from tensorflow.python.training.tracking import util as trackable_util
from tensorflow.python.util import nest
# Used for nested input/output/state RNN test.
NestedInput = collections.namedtuple('NestedInput', ['t1', 't2'])
NestedState = collections.namedtuple('NestedState', ['s1', 's2'])
@keras_parameterized.run_all_keras_modes
class RNNTest(keras_parameterized.TestCase):
def test_minimal_rnn_cell_non_layer(self):
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_non_layer_multiple_states(self):
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16)]
layer = keras.layers.RNN(cells)
self.assertEqual(layer.cell.state_size, ((8, 8), (16, 16), (32, 32)))
self.assertEqual(layer.cell.output_size, 32)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_minimal_rnn_cell_layer(self):
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(MinimalRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8),
MinimalRNNCell(12),
MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_minimal_rnn_cell_abstract_rnn_cell(self):
class MinimalRNNCell(keras.layers.AbstractRNNCell):
def __init__(self, units, **kwargs):
self.units = units
super(MinimalRNNCell, self).__init__(**kwargs)
@property
def state_size(self):
return self.units
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, output
@property
def output_size(self):
return self.units
cell = MinimalRNNCell(32)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer="rmsprop",
loss="mse",
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8),
MinimalRNNCell(16),
MinimalRNNCell(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_with_time_major(self):
batch = 10
time_step = 5
embedding_dim = 4
units = 3
# Test basic case.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
layer = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True)
self.assertEqual(
layer.compute_output_shape((time_step, None,
embedding_dim)).as_list(),
[time_step, None, units])
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, units))
y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
# Test stacking.
x = keras.Input((time_step, embedding_dim))
time_major_x = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
cell_units = [10, 8, 6]
cells = [keras.layers.SimpleRNNCell(cell_units[i]) for i in range(3)]
layer = keras.layers.RNN(cells, time_major=True, return_sequences=True)
y = layer(time_major_x)
self.assertEqual(layer.output_shape, (time_step, None, cell_units[-1]))
y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(y)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, cell_units[-1])))
# Test masking.
x = keras.Input((time_step, embedding_dim))
time_major = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
mask = keras.layers.Masking()(time_major)
rnn = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True)(mask)
y = keras.layers.Lambda(lambda t: array_ops.transpose(t, [1, 0, 2]))(rnn)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
# Test layer output
x = keras.Input((time_step, embedding_dim))
rnn_1 = keras.layers.SimpleRNN(units, return_sequences=True)
y = rnn_1(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, embedding_dim)),
np.zeros((batch, time_step, units)))
x_np = np.random.random((batch, time_step, embedding_dim))
y_np_1 = model.predict(x_np)
time_major = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(x)
rnn_2 = keras.layers.SimpleRNN(
units, time_major=True, return_sequences=True)
y_2 = rnn_2(time_major)
y_2 = keras.layers.Lambda(
lambda t: array_ops.transpose(t, [1, 0, 2]))(y_2)
model_2 = keras.models.Model(x, y_2)
rnn_2.set_weights(rnn_1.get_weights())
y_np_2 = model_2.predict(x_np)
self.assertAllClose(y_np_1, y_np_2, atol=1e-4)
def test_rnn_cell_with_constants_layer(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# test flat list inputs.
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
# Test stacking.
cells = [keras.layers.recurrent.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3)]
layer = keras.layers.recurrent.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test GRUCell reset_after property.
x = keras.Input((None, 5))
c = keras.Input((3,))
cells = [keras.layers.recurrent.GRUCell(32, reset_after=True)]
layer = keras.layers.recurrent.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test stacked RNN serialization
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.recurrent.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
def test_rnn_cell_with_non_keras_constants(self):
# Test basic case.
x = keras.Input((None, 5))
c = array_ops.zeros([6, 3], dtype=dtypes.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [keras.layers.recurrent.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3)]
layer = keras.layers.recurrent.RNN(cells)
y = layer(x, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_rnn_cell_with_constants_layer_passing_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10., c_np])
with self.assertRaises(AssertionError):
self.assertAllClose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = keras.layers.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
self.assertAllClose(y_np, y_np_3, atol=1e-4)
def test_rnn_cell_with_non_keras_constants_and_initial_state(self):
# Test basic case.
x = keras.Input((None, 5))
c = array_ops.zeros([6, 3], dtype=dtypes.float32)
s = array_ops.zeros([6, 32], dtype=dtypes.float32)
cell = RNNCellWithConstants(32, constant_size=3)
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [keras.layers.recurrent.GRUCell(8),
RNNCellWithConstants(12, constant_size=3),
RNNCellWithConstants(32, constant_size=3)]
layer = keras.layers.recurrent.RNN(cells)
s = [array_ops.zeros([6, 8], dtype=dtypes.float32),
array_ops.zeros([6, 12], dtype=dtypes.float32),
array_ops.zeros([6, 32], dtype=dtypes.float32)]
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
def test_stacked_rnn_attributes(self):
if context.executing_eagerly():
self.skipTest('reduce_sum is not available in eager mode.')
cells = [keras.layers.LSTMCell(1),
keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
layer.build((None, None, 1))
# Test weights
self.assertEqual(len(layer.trainable_weights), 6)
cells[0].trainable = False
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 3)
# Test `get_losses_for` and `losses`
x = keras.Input((None, 1))
loss_1 = math_ops.reduce_sum(x)
loss_2 = math_ops.reduce_sum(cells[0].kernel)
cells[0].add_loss(loss_1, inputs=x)
cells[0].add_loss(loss_2)
self.assertEqual(len(layer.losses), 2)
self.assertEqual(layer.get_losses_for(None), [loss_2])
self.assertEqual(layer.get_losses_for(x), [loss_1])
# Test `get_updates_for` and `updates`
cells = [keras.layers.LSTMCell(1),
keras.layers.LSTMCell(1)]
layer = keras.layers.RNN(cells)
x = keras.Input((None, 1))
_ = layer(x)
update_1 = state_ops.assign_add(cells[0].kernel,
x[0, 0, 0] * cells[0].kernel)
update_2 = state_ops.assign_add(cells[0].kernel,
array_ops.ones_like(cells[0].kernel))
# TODO(b/128682878): Remove when RNNCells are __call__'d.
with base_layer_utils.call_context().enter(layer, x, True, None):
cells[0].add_update(update_1, inputs=x)
cells[0].add_update(update_2)
self.assertEqual(len(layer.updates), 2)
self.assertEqual(len(layer.get_updates_for(None)), 1)
self.assertEqual(len(layer.get_updates_for(x)), 1)
def test_rnn_dynamic_trainability(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
layer = layer_class(units)
layer.build((None, None, embedding_dim))
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
layer.trainable = False
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.trainable = True
self.assertEqual(len(layer.weights), 3)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 0)
def test_state_reuse_with_dropout(self):
layer_class = keras.layers.SimpleRNN
embedding_dim = 4
units = 3
timesteps = 2
num_samples = 2
input1 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units,
return_state=True,
return_sequences=True,
dropout=0.2)
state = layer(input1)[1:]
input2 = keras.Input(batch_shape=(num_samples, timesteps, embedding_dim))
output = layer_class(units)(input2, initial_state=state)
model = keras.Model([input1, input2], output)
inputs = [np.random.random((num_samples, timesteps, embedding_dim)),
np.random.random((num_samples, timesteps, embedding_dim))]
model.predict(inputs)
def test_builtin_rnn_cell_serialization(self):
for cell_class in [keras.layers.SimpleRNNCell,
keras.layers.GRUCell,
keras.layers.LSTMCell]:
# Test basic case.
x = keras.Input((None, 5))
cell = cell_class(32)
layer = keras.layers.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [cell_class(8),
cell_class(12),
cell_class(32)]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
layer = keras.layers.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
self.assertAllClose(y_np, y_np_2, atol=1e-4)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
layer=[rnn_v1.SimpleRNN, rnn_v1.GRU, rnn_v1.LSTM,
rnn_v2.GRU, rnn_v2.LSTM],
unroll=[True, False]))
def test_rnn_dropout(self, layer, unroll):
rnn_layer = layer(3, dropout=0.1, recurrent_dropout=0.1, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = rnn_layer(x)
model = keras.models.Model(x, y)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
@parameterized.named_parameters(
*test_util.generate_combinations_with_testcase_name(
cell=[keras.layers.SimpleRNNCell, keras.layers.GRUCell,
keras.layers.LSTMCell],
unroll=[True, False]))
def test_stacked_rnn_dropout(self, cell, unroll):
cells = [cell(3, dropout=0.1, recurrent_dropout=0.1),
cell(3, dropout=0.1, recurrent_dropout=0.1)]
layer = keras.layers.RNN(cells, unroll=unroll)
if not unroll:
x = keras.Input((None, 5))
else:
x = keras.Input((5, 5))
y = layer(x)
model = keras.models.Model(x, y)
model.compile('sgd', 'mse', run_eagerly=testing_utils.should_run_eagerly())
x_np = np.random.random((6, 5, 5))
y_np = np.random.random((6, 3))
model.train_on_batch(x_np, y_np)
def test_dropout_mask_reuse(self):
# The layer is created with recurrent_initializer = zero, so that the
# the recurrent state won't affect the output. By doing this, we can verify
# the output and see if the same mask is applied to for each timestep.
rnn = keras.layers.SimpleRNN(3,
dropout=0.5,
kernel_initializer='ones',
recurrent_initializer='zeros',
return_sequences=True,
unroll=True)
inputs = constant_op.constant(1.0, shape=(6, 2, 5))
out = rnn(inputs, training=True)
if not context.executing_eagerly():
self.evaluate(variables_lib.global_variables_initializer())
batch_1 = self.evaluate(out)
batch_1_t0, batch_1_t1 = batch_1[:, 0, :], batch_1[:, 1, :]
self.assertAllClose(batch_1_t0, batch_1_t1)
# This simulate the layer called with multiple batches in eager mode
if context.executing_eagerly():
out2 = rnn(inputs, training=True)
else:
out2 = out
batch_2 = self.evaluate(out2)
batch_2_t0, batch_2_t1 = batch_2[:, 0, :], batch_2[:, 1, :]
self.assertAllClose(batch_2_t0, batch_2_t1)
# Also validate that different dropout is used by between batches.
self.assertNotAllClose(batch_1_t0, batch_2_t0)
self.assertNotAllClose(batch_1_t1, batch_2_t1)
def test_stacked_rnn_compute_output_shape(self):
cells = [keras.layers.LSTMCell(3),
keras.layers.LSTMCell(6)]
embedding_dim = 4
timesteps = 2
layer = keras.layers.RNN(cells, return_state=True, return_sequences=True)
output_shape = layer.compute_output_shape((None, timesteps, embedding_dim))
expected_output_shape = [(None, timesteps, 6),
(None, 3),
(None, 3),
(None, 6),
(None, 6)]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape],
expected_output_shape)
# Test reverse_state_order = True for stacked cell.
stacked_cell = keras.layers.StackedRNNCells(
cells, reverse_state_order=True)
layer = keras.layers.RNN(
stacked_cell, return_state=True, return_sequences=True)
output_shape = layer.compute_output_shape((None, timesteps, embedding_dim))
expected_output_shape = [(None, timesteps, 6),
(None, 6),
(None, 6),
(None, 3),
(None, 3)]
self.assertEqual(
[tuple(o.as_list()) for o in output_shape],
expected_output_shape)
def test_trackable_dependencies(self):
rnn = keras.layers.SimpleRNN
x = np.random.random((2, 2, 2))
y = np.random.random((2, 2))
model = keras.models.Sequential()
model.add(rnn(2))
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.fit(x, y, epochs=1, batch_size=1)
# check whether the model variables are present in the
# trackable list of objects
checkpointed_objects = object_identity.ObjectIdentitySet(
trackable_util.list_objects(model))
for v in model.variables:
self.assertIn(v, checkpointed_objects)
def test_high_dimension_RNN(self):
# Basic test case.
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
layer = keras.layers.RNN(cell)
y = layer(x)
self.assertEqual(cell.state_size.as_list(), [unit_a, unit_b])
if not context.executing_eagerly():
init_state = layer.get_initial_state(x)
self.assertEqual(len(init_state), 1)
self.assertEqual(init_state[0].shape.as_list(), [None, unit_a, unit_b])
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b)))
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Test stacking.
cells = [
Minimal2DRNNCell(unit_a, unit_b),
Minimal2DRNNCell(unit_a * 2, unit_b * 2),
Minimal2DRNNCell(unit_a * 4, unit_b * 4)
]
layer = keras.layers.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a * 4, unit_b * 4)))
self.assertEqual(model.output_shape, (None, unit_a * 4, unit_b * 4))
def test_high_dimension_RNN_with_init_state(self):
unit_a = 10
unit_b = 20
input_a = 5
input_b = 10
batch = 32
time_step = 4
# Basic test case.
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((unit_a, unit_b))
layer = keras.layers.RNN(cell)
y = layer(x, initial_state=s)
model = keras.models.Model([x, s], y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch([
np.zeros((batch, time_step, input_a, input_b)),
np.zeros((batch, unit_a, unit_b))
], np.zeros((batch, unit_a, unit_b)))
self.assertEqual(model.output_shape, (None, unit_a, unit_b))
# Bad init state shape.
bad_shape_a = unit_a * 2
bad_shape_b = unit_b * 2
cell = Minimal2DRNNCell(unit_a, unit_b)
x = keras.Input((None, input_a, input_b))
s = keras.Input((bad_shape_a, bad_shape_b))
layer = keras.layers.RNN(cell)
with self.assertRaisesWithPredicateMatch(ValueError,
'however `cell.state_size` is'):
layer(x, initial_state=s)
def test_inconsistent_output_state_size(self):
batch = 32
time_step = 4
state_size = 5
input_size = 6
cell = PlusOneRNNCell(state_size)
x = keras.Input((None, input_size))
layer = keras.layers.RNN(cell)
y = layer(x)
self.assertEqual(cell.state_size, state_size)
if not context.executing_eagerly():
init_state = layer.get_initial_state(x)
self.assertEqual(len(init_state), 1)
self.assertEqual(init_state[0].shape.as_list(), [None, state_size])
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, time_step, input_size)),
np.zeros((batch, input_size)))
self.assertEqual(model.output_shape, (None, input_size))
def test_get_initial_state(self):
cell = keras.layers.SimpleRNNCell(5)
with self.assertRaisesRegexp(ValueError,
'batch_size and dtype cannot be None'):
cell.get_initial_state(None, None, None)
if not context.executing_eagerly():
inputs = keras.Input((None, 10))
initial_state = cell.get_initial_state(inputs, None, None)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
batch = array_ops.shape(inputs)[0]
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [None, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
else:
batch = 8
inputs = np.random.random((batch, 10))
initial_state = cell.get_initial_state(inputs, None, None)
self.assertEqual(initial_state.shape.as_list(), [8, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
dtype = inputs.dtype
initial_state = cell.get_initial_state(None, batch, dtype)
self.assertEqual(initial_state.shape.as_list(), [batch, 5])
self.assertEqual(initial_state.dtype, inputs.dtype)
def test_nested_input_output(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
outputs = rnn((input_1, input_2))
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [None, o1])
self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3])
model = keras.models.Model((input_1, input_2), outputs)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)), np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
outputs = rnn(NestedInput(t1=input_1, t2=input_2))
self.assertEqual(len(outputs), 2)
self.assertEqual(outputs[0].shape.as_list(), [None, o1])
self.assertEqual(outputs[1].shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], outputs)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, o1)), np.zeros((batch, o2, o3))])
self.assertEqual(model.output_shape, [(None, o1), (None, o2, o3)])
def test_nested_input_output_with_state(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, s1, s2 = rnn((input_1, input_2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2], [output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
def test_nest_input_output_with_init_state(self):
batch = 10
t = 5
i1, i2, i3 = 3, 4, 5
o1, o2, o3 = 2, 3, 4
cell = NestedCell(o1, o2, o3)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
init_s1 = keras.Input((o1,))
init_s2 = keras.Input((o2, o3))
output1, output2, s1, s2 = rnn((input_1, input_2),
initial_state=(init_s1, init_s2))
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2, init_s1, init_s2],
[output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3)),
np.zeros((batch, o1)),
np.zeros((batch, o2, o3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
cell = NestedCell(o1, o2, o3, use_tuple=True)
rnn = keras.layers.RNN(cell, return_sequences=True, return_state=True)
input_1 = keras.Input((t, i1))
input_2 = keras.Input((t, i2, i3))
init_s1 = keras.Input((o1,))
init_s2 = keras.Input((o2, o3))
init_state = NestedState(s1=init_s1, s2=init_s2)
output1, output2, s1, s2 = rnn(NestedInput(t1=input_1, t2=input_2),
initial_state=init_state)
self.assertEqual(output1.shape.as_list(), [None, t, o1])
self.assertEqual(output2.shape.as_list(), [None, t, o2, o3])
self.assertEqual(s1.shape.as_list(), [None, o1])
self.assertEqual(s2.shape.as_list(), [None, o2, o3])
model = keras.models.Model([input_1, input_2, init_s1, init_s2],
[output1, output2])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
[np.zeros((batch, t, i1)),
np.zeros((batch, t, i2, i3)),
np.zeros((batch, o1)),
np.zeros((batch, o2, o3))],
[np.zeros((batch, t, o1)),
np.zeros((batch, t, o2, o3))])
self.assertEqual(model.output_shape, [(None, t, o1), (None, t, o2, o3)])
def test_peephole_lstm_cell(self):
def _run_cell(cell_fn, **kwargs):
inputs = array_ops.one_hot([1, 2, 3, 4], 4)
cell = cell_fn(5, **kwargs)
cell.build(inputs.shape)
initial_state = cell.get_initial_state(
inputs=inputs, batch_size=4, dtype=dtypes.float32)
inputs, _ = cell(inputs, initial_state)
output = inputs
if not context.executing_eagerly():
self.evaluate(variables_lib.global_variables_initializer())
output = self.evaluate(output)
return output
random_seed.set_random_seed(12345)
# `recurrent_activation` kwarg is set to sigmoid as that is hardcoded into
# rnn_cell.LSTMCell.
no_peephole_output = _run_cell(
keras.layers.LSTMCell,
kernel_initializer='ones',
recurrent_activation='sigmoid',
implementation=1)
first_implementation_output = _run_cell(
keras.layers.PeepholeLSTMCell,
kernel_initializer='ones',
recurrent_activation='sigmoid',
implementation=1)
second_implementation_output = _run_cell(
keras.layers.PeepholeLSTMCell,
kernel_initializer='ones',
recurrent_activation='sigmoid',
implementation=2)
tf_lstm_cell_output = _run_cell(
rnn_cell.LSTMCell,
use_peepholes=True,
initializer=init_ops.ones_initializer)
self.assertNotAllClose(first_implementation_output, no_peephole_output)
self.assertAllClose(first_implementation_output,
second_implementation_output)
self.assertAllClose(first_implementation_output, tf_lstm_cell_output)
def test_masking_rnn_with_output_and_states(self):
class Cell(keras.layers.Layer):
def __init__(self):
self.state_size = None
self.output_size = None
super(Cell, self).__init__()
def build(self, input_shape):
self.state_size = input_shape[-1]
self.output_size = input_shape[-1]
def call(self, inputs, states):
return inputs, [s + 1 for s in states]
x = keras.Input((3, 1), name='x')
x_masked = keras.layers.Masking()(x)
s_0 = keras.Input((1,), name='s_0')
y, s = keras.layers.RNN(
Cell(), return_state=True)(x_masked, initial_state=s_0)
model = keras.models.Model([x, s_0], [y, s])
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
# last time step masked
x_np = np.array([[[1.], [2.], [0.]]])
s_0_np = np.array([[10.]])
y_np, s_np = model.predict([x_np, s_0_np])
# 1 is added to initial state two times
self.assertAllClose(s_np, s_0_np + 2)
# Expect last output to be the same as last output before masking
self.assertAllClose(y_np, x_np[:, 1, :])
def test_zero_output_for_masking(self):
for unroll in [True, False]:
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((5, 5))
mask = keras.layers.Masking()
layer = keras.layers.RNN(
cell, return_sequences=True, zero_output_for_mask=True, unroll=unroll)
masked_input = mask(x)
y = layer(masked_input)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
np_x = np.ones((6, 5, 5))
result_1 = model.predict(np_x)
# set the time 4 and 5 for last record to be zero (masked).
np_x[5, 3:] = 0
result_2 = model.predict(np_x)
# expect the result_2 has same output, except the time 4,5 for last
# record.
result_1[5, 3:] = 0
self.assertAllClose(result_1, result_2)
def test_unroll_single_step(self):
"""Even if the time dimension is only one, we should be able to unroll."""
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((1, 5))
layer = keras.layers.RNN(cell, return_sequences=True, unroll=True)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(
optimizer='rmsprop',
loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
np_x = np.ones((6, 1, 5))
result = model.predict(np_x)
self.assertEqual((6, 1, 5), result.shape)
def test_unroll_zero_step(self):
"""If the time dimension is None, we should fail to unroll."""
cell = keras.layers.SimpleRNNCell(5)
x = keras.Input((None, 5))
layer = keras.layers.RNN(cell, return_sequences=True, unroll=True)
with self.assertRaisesRegexp(ValueError, 'Cannot unroll a RNN.*'):
layer(x)
def test_full_input_spec(self):
# See https://github.com/tensorflow/tensorflow/issues/25985
inputs = keras.layers.Input(batch_shape=(1, 1, 1))
state_h = keras.layers.Input(batch_shape=(1, 1))
state_c = keras.layers.Input(batch_shape=(1, 1))
states = [state_h, state_c]
decoder_out = keras.layers.LSTM(1, stateful=True)(
inputs,
initial_state=states
)
model = keras.Model([inputs, state_h, state_c], decoder_out)
model.reset_states()
def test_reset_states(self):
# See https://github.com/tensorflow/tensorflow/issues/25852
with self.assertRaisesRegexp(ValueError, 'it needs to know its batch size'):
simple_rnn = keras.layers.SimpleRNN(1, stateful=True)
simple_rnn.reset_states()
with self.assertRaisesRegexp(ValueError, 'it needs to know its batch size'):
cell = Minimal2DRNNCell(1, 2)
custom_rnn = keras.layers.RNN(cell, stateful=True)
custom_rnn.reset_states()
@parameterized.parameters(
[keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell])
def test_stateful_rnn_with_stacking(self, cell):
# See https://github.com/tensorflow/tensorflow/issues/28614.
batch = 12
timesteps = 10
input_dim = 8
output_dim = 64
cells = [cell(32), cell(64)]
x = keras.Input(batch_shape=(batch, None, input_dim))
layer = keras.layers.RNN(cells, stateful=True)
y = layer(x)
model = keras.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)),
np.zeros((batch, output_dim)))
model.predict(np.ones((batch, timesteps, input_dim)))
model.reset_states()
model.predict(np.ones((batch, timesteps, input_dim)))
new_states = nest.map_structure(lambda s: np.ones((batch, s)),
layer.cell.state_size)
layer.reset_states(new_states)
model.predict(np.ones((batch, timesteps, input_dim)))
def test_input_dim_length(self):
simple_rnn = keras.layers.SimpleRNN(5, input_length=10, input_dim=8)
self.assertEqual(simple_rnn._batch_input_shape, (None, 10, 8))
simple_rnn = keras.layers.SimpleRNN(5, input_dim=8)
self.assertEqual(simple_rnn._batch_input_shape, (None, None, 8))
simple_rnn = keras.layers.SimpleRNN(5, input_length=10)
self.assertEqual(simple_rnn._batch_input_shape, (None, 10, None))
@parameterized.parameters(
[keras.layers.SimpleRNNCell, keras.layers.GRUCell, keras.layers.LSTMCell])
def test_state_spec_with_stack_cell(self, cell):
# See https://github.com/tensorflow/tensorflow/issues/27817 for more detail.
batch = 12
timesteps = 10
input_dim = 8
output_dim = 8
def create_cell():
return [cell(output_dim),
cell(output_dim),
cell(output_dim)]
inputs = keras.Input((timesteps, input_dim))
encoder_output = keras.layers.RNN(create_cell(), return_state=True)(inputs)
states = encoder_output[1:]
decoder_output = keras.layers.RNN(
create_cell())(inputs, initial_state=states)
model = keras.models.Model(inputs, decoder_output)
model.compile(optimizer='rmsprop', loss='mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(
np.zeros((batch, timesteps, input_dim)),
np.zeros((batch, output_dim)))
model.predict(np.ones((batch, timesteps, input_dim)))
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, constant_size, **kwargs):
self.units = units
self.state_size = units
self.constant_size = constant_size
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(self.constant_size, self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units, 'constant_size': self.constant_size}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class Minimal2DRNNCell(keras.layers.Layer):
"""The minimal 2D RNN cell is a simple combination of 2 1-D RNN cell.
Both internal state and output have 2 dimensions and are orthogonal
between each other.
"""
def __init__(self, unit_a, unit_b, **kwargs):
self.unit_a = unit_a
self.unit_b = unit_b
self.state_size = tensor_shape.as_shape([unit_a, unit_b])
self.output_size = tensor_shape.as_shape([unit_a, unit_b])
super(Minimal2DRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
input_a = input_shape[-2]
input_b = input_shape[-1]
self.kernel = self.add_weight(
shape=(input_a, input_b, self.unit_a, self.unit_b),
initializer='uniform',
name='kernel')
self.recurring_kernel = self.add_weight(
shape=(self.unit_a, self.unit_b, self.unit_a, self.unit_b),
initializer='uniform',
name='recurring_kernel')
self.bias = self.add_weight(
shape=(self.unit_a, self.unit_b), initializer='uniform', name='bias')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = special_math_ops.einsum('bij,ijkl->bkl', inputs, self.kernel)
h += array_ops.expand_dims(self.bias, axis=0)
output = h + special_math_ops.einsum('bij,ijkl->bkl', prev_output,
self.recurring_kernel)
return output, [output]
class PlusOneRNNCell(keras.layers.Layer):
"""Add one to the input and state.
This cell is used for testing state_size and output_size."""
def __init__(self, num_unit, **kwargs):
self.state_size = num_unit
super(PlusOneRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.output_size = input_shape[-1]
def call(self, inputs, states):
return inputs + 1, [states[0] + 1]
class NestedCell(keras.layers.Layer):
def __init__(self, unit_1, unit_2, unit_3, use_tuple=False, **kwargs):
self.unit_1 = unit_1
self.unit_2 = unit_2
self.unit_3 = unit_3
self.use_tuple = use_tuple
super(NestedCell, self).__init__(**kwargs)
# A nested state.
if use_tuple:
self.state_size = NestedState(
s1=unit_1, s2=tensor_shape.TensorShape([unit_2, unit_3]))
else:
self.state_size = (unit_1, tensor_shape.TensorShape([unit_2, unit_3]))
self.output_size = (unit_1, tensor_shape.TensorShape([unit_2, unit_3]))
def build(self, inputs_shape):
# expect input_shape to contain 2 items, [(batch, i1), (batch, i2, i3)]
if self.use_tuple:
input_1 = inputs_shape.t1[1]
input_2, input_3 = inputs_shape.t2[1:]
else:
input_1 = inputs_shape[0][1]
input_2, input_3 = inputs_shape[1][1:]
self.kernel_1 = self.add_weight(
shape=(input_1, self.unit_1), initializer='uniform', name='kernel_1')
self.kernel_2_3 = self.add_weight(
shape=(input_2, input_3, self.unit_2, self.unit_3),
initializer='uniform',
name='kernel_2_3')
def call(self, inputs, states):
# inputs should be in [(batch, input_1), (batch, input_2, input_3)]
# state should be in shape [(batch, unit_1), (batch, unit_2, unit_3)]
flatten_inputs = nest.flatten(inputs)
s1, s2 = states
output_1 = math_ops.matmul(flatten_inputs[0], self.kernel_1)
output_2_3 = special_math_ops.einsum('bij,ijkl->bkl', flatten_inputs[1],
self.kernel_2_3)
state_1 = s1 + output_1
state_2_3 = s2 + output_2_3
output = [output_1, output_2_3]
new_states = NestedState(s1=state_1, s2=state_2_3)
return output, new_states
if __name__ == '__main__':
test.main()
| alsrgv/tensorflow | tensorflow/python/keras/layers/recurrent_test.py | Python | apache-2.0 | 54,154 |
# Copyright 2017 Ravi Sojitra. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import pytest
from numpy.testing import assert_allclose
from plda.plda.optimizer import (
calc_scatter_matrices,
optimize_maximum_likelihood
)
from plda.tests.utils import (
assert_error_falls_as_K_increases,
assert_error_falls_as_n_increases,
calc_mean_squared_error,
generate_data
)
def test_S_w_and_n_recover_Phi_w():
def calc_error(truth_dict):
data = truth_dict['data']
labels = truth_dict['labels']
S_b, S_w = calc_scatter_matrices(data, labels)
n = truth_dict['n_k']
expected = truth_dict['Phi_w']
predicted = n / (n - 1) * S_w
error = calc_mean_squared_error(expected, predicted, as_log=True)
return error
ns = [10, 100, 1000] # List of sample sizes.
np.random.seed(1234)
assert_error_falls_as_n_increases(calc_error,
K=2, D=2, n_k_list=ns)
assert_error_falls_as_n_increases(calc_error,
K=100, D=100, n_k_list=ns)
def test_S_b_and_S_w_and_n_recover_Phi_b():
def calc_error(truth_dict):
data = truth_dict['data']
labels = truth_dict['labels']
S_b, S_w = calc_scatter_matrices(data, labels)
n = truth_dict['n_k']
expected = truth_dict['Phi_b']
predicted = S_b - S_w / (n - 1)
error = calc_mean_squared_error(expected, predicted, as_log=True)
return error
ns = [10, 100, 1000] # List of sample sizes.
np.random.seed(1234)
assert_error_falls_as_n_increases(calc_error,
K=2, D=2, n_k_list=ns)
assert_error_falls_as_n_increases(calc_error,
K=100, D=100, n_k_list=ns)
ks = [10, 100, 1000] # List of numbers of categories.
np.random.seed(1234)
assert_error_falls_as_K_increases(calc_error,
n_k=2, D=2, k_list=ks)
assert_error_falls_as_K_increases(calc_error,
n_k=100, D=100, k_list=ks)
@pytest.fixture(scope='module')
def truth_dict():
np.random.seed(1234)
return generate_data(n_k=500, K=2000, dimensionality=5)
@pytest.fixture(scope='module')
def fitted_parameters(truth_dict):
X = truth_dict['data']
Y = truth_dict['labels']
return optimize_maximum_likelihood(X, Y)
def test_optimize_maximum_likelihood_m(truth_dict, fitted_parameters):
expected = truth_dict['prior_mean']
actual = fitted_parameters[0]
assert_allclose(expected, actual, atol=.6)
def calc_error(truth_dict):
X = truth_dict['data']
Y = truth_dict['labels']
expected = truth_dict['prior_mean']
predicted = optimize_maximum_likelihood(X, Y)[0]
error = calc_mean_squared_error(expected, predicted, as_log=True)
return error
ks = [10, 100, 1000] # List of numbers of categories.
np.random.seed(1234)
assert_error_falls_as_K_increases(calc_error, n_k=50, D=2, k_list=ks)
def test_optimize_maximum_likelihood_A():
""" Implemented in tests/test_model/test_model_inference.py. """
pass
def test_optimize_maximum_likelihood_Psi():
""" Implemented in tests/test_model/test_model_inference.py. """
pass
def test_optimize_maximum_likelihood_relevant_U_dims():
""" Implemented in tests/test_model/test_model_inference.py. """
pass
def test_optimize_maximum_likelihood_inv_A():
""" Implemented in tests/test_model/test_model_inference.py. """
pass
| RaviSoji/probabilistic_LDA | tests/test_optimizer/test_optimizer_inference.py | Python | apache-2.0 | 4,213 |
from django.shortcuts import render
from django.conf import settings
from common.views import AbsSegmentSelection
#from common.views import AbsTargetSelection
from common.views import AbsTargetSelectionTable
# from common.alignment_SITE_NAME import Alignment
from protwis.context_processors import site_title
Alignment = getattr(__import__('common.alignment_' + settings.SITE_NAME, fromlist=['Alignment']), 'Alignment')
from collections import OrderedDict
class TargetSelection(AbsTargetSelectionTable):
step = 1
number_of_steps = 2
title = "SELECT RECEPTORS"
description = "Select receptors in the table (below) or browse the classification tree (right). You can select entire" \
+ " families or individual receptors.\n\nOnce you have selected all your receptors, click the green button."
docs = "sequences.html#similarity-matrix"
selection_boxes = OrderedDict([
("reference", False),
("targets", True),
("segments", False),
])
buttons = {
"continue": {
"label": "Next",
"onclick": "submitSelection('/similaritymatrix/segmentselection');",
"color": "success",
},
}
# class TargetSelection(AbsTargetSelection):
# step = 1
# number_of_steps = 2
# docs = 'sequences.html#similarity-matrix'
# selection_boxes = OrderedDict([
# ('reference', False),
# ('targets', True),
# ('segments', False),
# ])
# buttons = {
# 'continue': {
# 'label': 'Continue to next step',
# 'url': '/similaritymatrix/segmentselection',
# 'color': 'success',
# },
# }
class SegmentSelection(AbsSegmentSelection):
step = 2
number_of_steps = 2
docs = 'sequences.html#similarity-matrix'
selection_boxes = OrderedDict([
('reference', False),
('targets', False),
('segments', True),
])
buttons = {
'continue': {
'label': 'Show matrix',
'url': '/similaritymatrix/render',
'color': 'success',
},
}
def render_matrix(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
# load data from selection into the alignment
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# NOTE: NOT necessary for similarity matrix
# calculate consensus sequence + amino acid and feature frequency
# a.calculate_statistics()
# calculate identity and similarity of each row compared to the reference
a.calculate_similarity_matrix()
return render(request, 'similaritymatrix/matrix.html', {'p': a.proteins, 'm': a.similarity_matrix})
def render_csv_matrix(request):
# get the user selection from session
simple_selection = request.session.get('selection', False)
# create an alignment object
a = Alignment()
a.show_padding = False
# load data from selection into the alignment
a.load_proteins_from_selection(simple_selection)
a.load_segments_from_selection(simple_selection)
# build the alignment data matrix
a.build_alignment()
# calculate consensus sequence + amino acid and feature frequency
# NOTE: NOT necessary for similarity matrix
# a.calculate_statistics()
# calculate identity and similarity of each row compared to the reference
a.calculate_similarity_matrix()
response = render(request, 'similaritymatrix/matrix_csv.html', {'p': a.proteins, 'm': a.similarity_matrix})
response['Content-Disposition'] = "attachment; filename=" + site_title(request)["site_title"] + "_similaritymatrix.csv"
return response
| protwis/protwis | similaritymatrix/views.py | Python | apache-2.0 | 3,838 |
#!/usr/bin/env python
"""
Created by: Lee Bergstrand (2018)
Description: A simple unittest for testing the genome_property_tree module.
"""
import json
import unittest
from copy import deepcopy
from pygenprop.database_file_parser import parse_genome_property
from pygenprop.database_file_parser import parse_genome_properties_flat_file
from pygenprop.tree import GenomePropertiesTree
class TestGenomePropertyTree(unittest.TestCase):
"""A unit testing class for testing the tree.py module. To be called by nosetests."""
@classmethod
def setUpClass(cls):
"""
Test Properties Rooted DAG Structure:
GenProp0002 --> --> GenProp0089
GenProp0066
GenProp0003 --> --> GenProp0092
Note 1: The structure of the property tree used above is not the common case.
Commonly there should be only a single root node.
"""
property_rows_one = [
('AC', 'GenProp0002'),
('DE', 'Coenzyme F420 utilization'),
('TP', 'GUILD'),
('--', ''),
('SN', '1'),
('ID', 'Selfish genetic elements'),
('RQ', '0'),
('EV', 'GenProp0066;')
]
property_rows_two = [
('AC', 'GenProp0003'),
('DE', 'Coenzyme F420 utilization'),
('TP', 'GUILD'),
('--', ''),
('SN', '1'),
('ID', 'Selfish genetic elements'),
('RQ', '0'),
('EV', 'GenProp0066;')
]
property_rows_three = [
('AC', 'GenProp0066'),
('DE', 'Coenzyme F420 utilization'),
('TP', 'GUILD'),
('--', ''),
('SN', '1'),
('ID', 'Selfish genetic elements'),
('RQ', '0'),
('EV', 'GenProp0089;'),
('--', ''),
('SN', '2'),
('ID', 'Selfish genetic elements'),
('RQ', '0'),
('EV', 'GenProp0092;'),
]
property_rows_four = [
('AC', 'GenProp0089'),
('DE', 'Coenzyme F420 utilization'),
('TP', 'GUILD'),
('--', ''),
('SN', '1'),
('ID', 'LLM-family F420-associated subfamilies'),
('RQ', '0'),
('EV', 'IPR019910; TIGR03564; sufficient;')
]
property_rows_five = [
('AC', 'GenProp0092'),
('DE', 'Coenzyme F420 utilization'),
('TP', 'GUILD'),
('--', ''),
('SN', '1'),
('ID', 'LLM-family F420-associated subfamilies'),
('RQ', '0'),
('EV', 'IPR019911; TIGR03565; sufficient;')
]
property_one = parse_genome_property(property_rows_one)
property_two = parse_genome_property(property_rows_two)
property_three = parse_genome_property(property_rows_three)
property_four = parse_genome_property(property_rows_four)
property_five = parse_genome_property(property_rows_five)
cls.raw_properties = [property_one, property_two, property_three, property_four, property_five]
@property
def properties(self):
"""
Returns a copy of the properties created during the setUpClass.
:return: A deep copy of the test genome properties.
"""
return deepcopy(self.raw_properties)
def test_build_genome_property_connections(self):
"""Test that we can add child and parent genome properties."""
test_properties = self.properties
property_one = test_properties[0]
property_two = test_properties[1]
property_three = test_properties[2]
property_four = test_properties[3]
property_five = test_properties[4]
property_tree = GenomePropertiesTree(*test_properties)
self.assertEqual(property_tree[property_one.id].children[0], property_three)
self.assertEqual(property_tree[property_two.id].children[0], property_three)
self.assertCountEqual(property_tree[property_three.id].parents, [property_one, property_two])
self.assertCountEqual(property_tree[property_three.id].children, [property_four, property_five])
self.assertEqual(property_tree[property_four.id].parents[0], property_three)
self.assertEqual(property_tree[property_five.id].parents[0], property_three)
def test_find_leaf_nodes(self):
"""Test we can find the right leaf nodes."""
property_tree = GenomePropertiesTree(*self.properties)
leaf_ids = [leaf.id for leaf in property_tree.leafs]
self.assertCountEqual(leaf_ids, ['GenProp0089', 'GenProp0092'])
def test_find_root_node(self):
"""Test that we can find the correct genome property root."""
property_tree = GenomePropertiesTree(*self.properties)
root = property_tree.root
"""
Note 2: The root could be GenProp0002 or GenProp0003 depending on which one is stored first in the property
tree. Since we are using a dict not an OrderedDict inside of GenomePropertyTree we cannot guarantee that
GenProp0002 will always be returned as root. Thus we check if the root node is either property.
"""
self.assertIn(root.id, ['GenProp0002', 'GenProp0003'])
def test_create_json_graph_links(self):
"""Test that we can create parent child link json."""
property_tree = GenomePropertiesTree(*self.properties)
json_links = property_tree.create_graph_links_json(as_list=True)
predicted_links = [{'parent': 'GenProp0002', 'child': 'GenProp0066'},
{'parent': 'GenProp0003', 'child': 'GenProp0066'},
{'parent': 'GenProp0066', 'child': 'GenProp0089'},
{'parent': 'GenProp0066', 'child': 'GenProp0092'}]
self.assertCountEqual(json_links, predicted_links)
def test_create_json_graph_nodes(self):
"""Test that we can create nodes json."""
property_tree = GenomePropertiesTree(*self.properties)
json_nodes = property_tree.create_graph_nodes_json(as_list=True)
ids = {node['id'] for node in json_nodes}
names = {node['name'] for node in json_nodes}
types = {node['type'] for node in json_nodes}
descriptions = {node['description'] for node in json_nodes}
notes = {node['notes'] for node in json_nodes}
self.assertCountEqual(ids, {'GenProp0002', 'GenProp0003', 'GenProp0066', 'GenProp0089', 'GenProp0092'})
self.assertEqual(names, {'Coenzyme F420 utilization'})
self.assertEqual(types, {'GUILD'})
self.assertEqual(descriptions, {None})
self.assertEqual(notes, {None})
def test_create_nested_json(self):
"""Test that we can create nested json."""
property_tree = GenomePropertiesTree(*self.properties)
json_data = property_tree.create_nested_json(as_dict=True)
root_id = json_data['id']
"""Root could be either GenProp0002 or GenProp0003. See Note 1 in test_find_root_node()."""
self.assertIn(root_id, ['GenProp0002', 'GenProp0003'])
tree_level_one_children = json_data['children']
self.assertEqual(len(tree_level_one_children), 1)
level_one_child = tree_level_one_children[0]
self.assertEqual(level_one_child['id'], 'GenProp0066')
tree_level_two_children = level_one_child['children']
self.assertEqual(len(tree_level_two_children), 2)
level_two_child_one = tree_level_two_children[0]
level_two_child_two = tree_level_two_children[1]
self.assertIn(level_two_child_one['id'], ['GenProp0089', 'GenProp0092'])
self.assertIn(level_two_child_two['id'], ['GenProp0089', 'GenProp0092'])
self.assertNotEqual(level_two_child_one['id'], level_two_child_two['id'])
self.assertEqual(level_two_child_one['children'], [])
self.assertEqual(level_two_child_two['children'], [])
def test_json_string_creation(self):
"""Test that a JSON tree can be created from the genome properties tree."""
property_tree = GenomePropertiesTree(*self.properties)
test_json = property_tree.to_json()
expected_json_one = '''{"id": "GenProp0002", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null, "notes": null,
"children": [{"id": "GenProp0066", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null,
"notes": null, "children": [
{"id": "GenProp0089", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null,
"notes": null, "children": []},
{"id": "GenProp0092", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null,
"notes": null, "children": []}]}]}'''
test_json_parsed = json.loads(test_json)
expected_json_parsed_one = json.loads(expected_json_one)
"""Root could be either GenProp0002 or GenProp0003. See Note 1 in test_find_root_node()."""
expected_json_parsed_two = deepcopy(expected_json_parsed_one)
expected_json_parsed_two['id'] = 'GenProp0003'
self.assertIn(test_json_parsed, [expected_json_parsed_one, expected_json_parsed_two])
def test_json_string_creation_nodes_and_links(self):
"""Test that a nodes and links JSON can be created from the genome properties tree."""
property_tree = GenomePropertiesTree(*self.properties)
test_json = property_tree.to_json(nodes_and_links=True)
expected_json = '''{
"nodes": [{"id": "GenProp0002", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null,
"notes": null},
{"id": "GenProp0003", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null,
"notes": null},
{"id": "GenProp0066", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null,
"notes": null},
{"id": "GenProp0089", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null,
"notes": null},
{"id": "GenProp0092", "name": "Coenzyme F420 utilization", "type": "GUILD", "description": null,
"notes": null}],
"links": [{"parent": "GenProp0002", "child": "GenProp0066"}, {"parent": "GenProp0003", "child": "GenProp0066"},
{"parent": "GenProp0066", "child": "GenProp0089"},
{"parent": "GenProp0066", "child": "GenProp0092"}]}'''
test_json_parsed = json.loads(test_json)
expected_json_parsed = json.loads(expected_json)
test_json_nodes = test_json_parsed['nodes']
expected_json_nodes = expected_json_parsed['nodes']
test_json_links = test_json_parsed['links']
expected_json_links = expected_json_parsed['links']
self.assertCountEqual(test_json_nodes, expected_json_nodes)
self.assertCountEqual(test_json_links, expected_json_links)
def test_parse_genome_property_file(self):
"""Test if a physical genome properties file can be parsed."""
genome_property_flat_file_path = 'pygenprop/testing/test_constants/test_genome_properties.txt'
with open(genome_property_flat_file_path) as genome_property_file:
properties = parse_genome_properties_flat_file(genome_property_file)
self.assertEqual(len(properties), 4)
def test_get_property_identifiers(self):
"""Test that we can get a set of all property identifiers in the tree."""
property_tree = GenomePropertiesTree(*self.properties)
self.assertEqual(property_tree.genome_property_identifiers, {'GenProp0002', 'GenProp0089', 'GenProp0066',
'GenProp0003', 'GenProp0092'})
def test_get_interpro_identifiers(self):
"""Test that we can get a set of all InterPro identifiers used as evidence by the genome properties tree."""
property_tree = GenomePropertiesTree(*self.properties)
self.assertEqual(property_tree.interpro_identifiers, {'IPR019910', 'IPR019911'})
def test_get_consortium_identifiers(self):
"""Test that we can get a set of all consortium identifiers used as evidence by the genome properties tree."""
property_tree = GenomePropertiesTree(*self.properties)
self.assertEqual(property_tree.consortium_identifiers, {'TIGR03564', 'TIGR03565'})
| LeeBergstrand/pygenprop | pygenprop/testing/test_tree.py | Python | apache-2.0 | 12,594 |
from .testlib import TestCase
import argparse
import logging
import os.path
import re
import unittest
from resync.client_utils import init_logging, count_true_args, parse_links, parse_link, parse_capabilities, parse_capability_lists, add_shared_misc_options, process_shared_misc_options
from resync.client import ClientFatalError
from resync.url_or_file_open import CONFIG
class TestClientUtils(TestCase):
def test01_init_logging(self):
# to_file=False, logfile=None, default_logfile='/tmp/resync.log',
# human=True, verbose=False, eval_mode=False,
# default_logger='client', extra_loggers=None):
tmplog = os.path.join(self.tmpdir, 'tmp.log')
init_logging(to_file=True, default_logfile=tmplog,
extra_loggers=['x1', 'x2'])
# check x1 and x2 set, not x3 (can tell by level)
self.assertTrue(logging.getLogger('x1').level, logging.DEBUG)
self.assertTrue(logging.getLogger('x2').level, logging.DEBUG)
self.assertEqual(logging.getLogger('x3').level, 0)
# write something, check goes to file
log = logging.getLogger('resync')
log.warning('PIGS MIGHT FLY')
logtxt = open(tmplog, 'r').read()
self.assertTrue(re.search(r'WARNING \| PIGS MIGHT FLY', logtxt))
def test02_count_true_args(self):
self.assertEqual(count_true_args(), 0)
self.assertEqual(count_true_args(True), 1)
self.assertEqual(count_true_args(False), 0)
self.assertEqual(count_true_args(0, 1, 2, 3), 3)
def test03_parse_links(self):
self.assertEqual(parse_links([]), [])
self.assertEqual(parse_links(['u,h']), [{'href': 'h', 'rel': 'u'}])
self.assertEqual(parse_links(['u,h', 'v,i']), [
{'href': 'h', 'rel': 'u'}, {'href': 'i', 'rel': 'v'}])
self.assertRaises(ClientFatalError, parse_links, 'xx')
self.assertRaises(ClientFatalError, parse_links, ['u'])
self.assertRaises(ClientFatalError, parse_links, ['u,h', 'u'])
def test04_parse_link(self):
# Input string of the form: rel,href,att1=val1,att2=val2
self.assertEqual(parse_link('u,h'), {'href': 'h', 'rel': 'u'})
self.assertEqual(parse_link('u,h,a=b'), {
'a': 'b', 'href': 'h', 'rel': 'u'})
self.assertEqual(parse_link('u,h,a=b,c=d'), {
'a': 'b', 'c': 'd', 'href': 'h', 'rel': 'u'})
self.assertEqual(parse_link('u,h,a=b,a=d'), {
'a': 'd', 'href': 'h', 'rel': 'u'}) # desired??
self.assertRaises(ClientFatalError, parse_link, '')
self.assertRaises(ClientFatalError, parse_link, 'u')
self.assertRaises(ClientFatalError, parse_link, 'u,')
self.assertRaises(ClientFatalError, parse_link, 'u,h,,')
self.assertRaises(ClientFatalError, parse_link, 'u,h,a')
self.assertRaises(ClientFatalError, parse_link, 'u,h,a=')
self.assertRaises(ClientFatalError, parse_link, 'u,h,a=b,=c')
def test05_parse_capabilities(self):
# Input string of the form: cap_name=uri,cap_name=uri
# good
c = parse_capabilities('a=')
self.assertEqual(len(c), 1)
self.assertEqual(c['a'], '')
c = parse_capabilities('a=b,c=')
self.assertEqual(len(c), 2)
self.assertEqual(c['a'], 'b')
# bad
self.assertRaises(ClientFatalError, parse_capabilities, 'a')
self.assertRaises(ClientFatalError, parse_capabilities, 'a=b,')
def test06_parse_capability_lists(self):
# Input string of the form: uri,uri
self.assertEqual(parse_capability_lists('a,b'), ['a', 'b'])
def test07_add_shared_misc_options(self):
"""Test add_shared_misc_options method."""
parser = argparse.ArgumentParser()
add_shared_misc_options(parser, default_logfile='/tmp/abc.log')
args = parser.parse_args(['--hash', 'md5', '--hash', 'sha-1',
'--checksum',
'--from', '2020-01-01T01:01:01Z',
'--exclude', 'ex1', '--exclude', 'ex2',
'--multifile',
'--logger', '--logfile', 'log.out',
'--spec-version', '1.0',
'-v'])
self.assertEqual(args.hash, ['md5', 'sha-1'])
self.assertTrue(args.checksum)
self.assertEqual(args.from_datetime, '2020-01-01T01:01:01Z')
self.assertEqual(args.exclude, ['ex1', 'ex2'])
self.assertTrue(args.multifile)
self.assertTrue(args.logger)
self.assertEqual(args.logfile, 'log.out')
self.assertEqual(args.spec_version, '1.0')
self.assertTrue(args.verbose)
# Remote options
parser = argparse.ArgumentParser()
add_shared_misc_options(parser, default_logfile='/tmp/abc.log', include_remote=True)
args = parser.parse_args(['--noauth',
'--access-token', 'VerySecretToken',
'--delay', '1.23',
'--user-agent', 'rc/2.1.1'])
self.assertTrue(args.noauth)
self.assertEqual(args.access_token, 'VerySecretToken')
self.assertEqual(args.delay, 1.23)
self.assertEqual(args.user_agent, 'rc/2.1.1')
# Remote options note selected
parser = argparse.ArgumentParser()
add_shared_misc_options(parser, default_logfile='/tmp/abc.log', include_remote=False)
self.assertRaises(SystemExit, parser.parse_args, ['--access-token', 'VerySecretToken'])
def test08_process_shared_misc_options(self):
"""Test process_shared_misc_options method."""
global CONFIG
config_copy = CONFIG.copy()
args = argparse.Namespace(hash=['sha-1'], checksum='md5')
process_shared_misc_options(args)
self.assertEqual(args.hash, ['sha-1', 'md5'])
# Remote options
args = argparse.Namespace(access_token='ExtraSecretToken',
delay=2.5,
user_agent='me',
checksum=None)
process_shared_misc_options(args, include_remote=True)
self.assertEqual(CONFIG['bearer_token'], 'ExtraSecretToken')
self.assertEqual(CONFIG['delay'], 2.5)
self.assertEqual(CONFIG['user_agent'], 'me')
# Negative delay is bad...
args = argparse.Namespace(access_token=None, delay=-1.0, user_agent=None, checksum=None)
self.assertRaises(argparse.ArgumentTypeError, process_shared_misc_options, args, include_remote=True)
# Config is a global so reset back to old version
for (k, v) in config_copy.items():
CONFIG[k] = v
| resync/resync | tests/test_client_utils.py | Python | apache-2.0 | 6,834 |
"""
# setup module
"""
import os
from setuptools import setup, find_packages
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, 'README.md')) as f:
README = f.read()
# with open(os.path.join(HERE, 'CHANGES.txt')) as f:
# CHANGES = f.read()
CHANGES = "Changes"
PREQ = [
'pyramid',
'python-keystoneclient',
'python-swiftclient',
'pyyaml',
'responses',
'sniffer',
'waitress',
]
PREQ_DEV = [
'coverage',
'flake8',
'mock',
'nose',
'pylint',
'pyramid',
'tissue',
'webtest',
'tox',
]
setup(
name='codebase',
version='0.0.1',
description='Coding demo for Python',
long_description=README + '\n\n' + CHANGES,
classifiers=["Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application"],
author='Jason Zhu',
author_email='yuxin.zhu@hp.com',
url='',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=PREQ,
tests_require=PREQ_DEV,
test_suite="codebase",
entry_points="""\
[paste.app_factory]
main = codebase:main
""",
)
| dockerian/pyapi | demo/setup.py | Python | apache-2.0 | 1,256 |
# Copyright (c) 2015 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from oslo_db import exception as db_exc
from oslo_log import log as logging
from sqlalchemy import and_
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as n_exc
from neutron.common import ipv6_utils
from neutron.db import ipam_backend_mixin
from neutron.db import models_v2
from neutron.ipam import requests as ipam_req
from neutron.ipam import subnet_alloc
LOG = logging.getLogger(__name__)
class IpamNonPluggableBackend(ipam_backend_mixin.IpamBackendMixin):
@staticmethod
def _generate_ip(context, subnets):
try:
return IpamNonPluggableBackend._try_generate_ip(context, subnets)
except n_exc.IpAddressGenerationFailure:
IpamNonPluggableBackend._rebuild_availability_ranges(context,
subnets)
return IpamNonPluggableBackend._try_generate_ip(context, subnets)
@staticmethod
def _try_generate_ip(context, subnets):
"""Generate an IP address.
The IP address will be generated from one of the subnets defined on
the network.
"""
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
for subnet in subnets:
ip_range = range_qry.filter_by(subnet_id=subnet['id']).first()
if not ip_range:
LOG.debug("All IPs from subnet %(subnet_id)s (%(cidr)s) "
"allocated",
{'subnet_id': subnet['id'],
'cidr': subnet['cidr']})
continue
ip_address = ip_range['first_ip']
if ip_range['first_ip'] == ip_range['last_ip']:
# No more free indices on subnet => delete
LOG.debug("No more free IP's in slice. Deleting "
"allocation pool.")
context.session.delete(ip_range)
else:
# increment the first free
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
ip_range['first_ip'] = new_first_ip
LOG.debug("Allocated IP - %(ip_address)s from %(first_ip)s "
"to %(last_ip)s",
{'ip_address': ip_address,
'first_ip': ip_address,
'last_ip': ip_range['last_ip']})
return {'ip_address': ip_address,
'subnet_id': subnet['id']}
raise n_exc.IpAddressGenerationFailure(net_id=subnets[0]['network_id'])
@staticmethod
def _rebuild_availability_ranges(context, subnets):
"""Rebuild availability ranges.
This method is called only when there's no more IP available or by
_update_subnet_allocation_pools. Calling
_update_subnet_allocation_pools before calling this function deletes
the IPAllocationPools associated with the subnet that is updating,
which will result in deleting the IPAvailabilityRange too.
"""
ip_qry = context.session.query(
models_v2.IPAllocation).with_lockmode('update')
# PostgreSQL does not support select...for update with an outer join.
# No join is needed here.
pool_qry = context.session.query(
models_v2.IPAllocationPool).options(
orm.noload('available_ranges')).with_lockmode('update')
for subnet in sorted(subnets):
LOG.debug("Rebuilding availability ranges for subnet %s",
subnet)
# Create a set of all currently allocated addresses
ip_qry_results = ip_qry.filter_by(subnet_id=subnet['id'])
allocations = netaddr.IPSet([netaddr.IPAddress(i['ip_address'])
for i in ip_qry_results])
for pool in pool_qry.filter_by(subnet_id=subnet['id']):
# Create a set of all addresses in the pool
poolset = netaddr.IPSet(netaddr.IPRange(pool['first_ip'],
pool['last_ip']))
# Use set difference to find free addresses in the pool
available = poolset - allocations
# Generator compacts an ip set into contiguous ranges
def ipset_to_ranges(ipset):
first, last = None, None
for cidr in ipset.iter_cidrs():
if last and last + 1 != cidr.first:
yield netaddr.IPRange(first, last)
first = None
first, last = first if first else cidr.first, cidr.last
if first:
yield netaddr.IPRange(first, last)
# Write the ranges to the db
for ip_range in ipset_to_ranges(available):
available_range = models_v2.IPAvailabilityRange(
allocation_pool_id=pool['id'],
first_ip=str(netaddr.IPAddress(ip_range.first)),
last_ip=str(netaddr.IPAddress(ip_range.last)))
context.session.add(available_range)
@staticmethod
def _allocate_specific_ip(context, subnet_id, ip_address):
"""Allocate a specific IP address on the subnet."""
ip = int(netaddr.IPAddress(ip_address))
range_qry = context.session.query(
models_v2.IPAvailabilityRange).join(
models_v2.IPAllocationPool).with_lockmode('update')
results = range_qry.filter_by(subnet_id=subnet_id)
for ip_range in results:
first = int(netaddr.IPAddress(ip_range['first_ip']))
last = int(netaddr.IPAddress(ip_range['last_ip']))
if first <= ip <= last:
if first == last:
context.session.delete(ip_range)
return
elif first == ip:
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
ip_range['first_ip'] = new_first_ip
return
elif last == ip:
new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
ip_range['last_ip'] = new_last_ip
return
else:
# Adjust the original range to end before ip_address
old_last_ip = ip_range['last_ip']
new_last_ip = str(netaddr.IPAddress(ip_address) - 1)
ip_range['last_ip'] = new_last_ip
# Create a new second range for after ip_address
new_first_ip = str(netaddr.IPAddress(ip_address) + 1)
new_ip_range = models_v2.IPAvailabilityRange(
allocation_pool_id=ip_range['allocation_pool_id'],
first_ip=new_first_ip,
last_ip=old_last_ip)
context.session.add(new_ip_range)
return
@staticmethod
def _check_unique_ip(context, network_id, subnet_id, ip_address):
"""Validate that the IP address on the subnet is not in use."""
ip_qry = context.session.query(models_v2.IPAllocation)
try:
ip_qry.filter_by(network_id=network_id,
subnet_id=subnet_id,
ip_address=ip_address).one()
except exc.NoResultFound:
return True
return False
def save_allocation_pools(self, context, subnet, allocation_pools):
for pool in allocation_pools:
first_ip = str(netaddr.IPAddress(pool.first, pool.version))
last_ip = str(netaddr.IPAddress(pool.last, pool.version))
ip_pool = models_v2.IPAllocationPool(subnet=subnet,
first_ip=first_ip,
last_ip=last_ip)
context.session.add(ip_pool)
ip_range = models_v2.IPAvailabilityRange(
ipallocationpool=ip_pool,
first_ip=first_ip,
last_ip=last_ip)
context.session.add(ip_range)
def allocate_ips_for_port_and_store(self, context, port, port_id):
network_id = port['port']['network_id']
ips = self._allocate_ips_for_port(context, port)
if ips:
for ip in ips:
ip_address = ip['ip_address']
subnet_id = ip['subnet_id']
self._store_ip_allocation(context, ip_address, network_id,
subnet_id, port_id)
def update_port_with_ips(self, context, db_port, new_port, new_mac):
changes = self.Changes(add=[], original=[], remove=[])
# Check if the IPs need to be updated
network_id = db_port['network_id']
if 'fixed_ips' in new_port:
original = self._make_port_dict(db_port, process_extensions=False)
changes = self._update_ips_for_port(
context, network_id,
original["fixed_ips"], new_port['fixed_ips'],
original['mac_address'], db_port['device_owner'])
# Update ips if necessary
for ip in changes.add:
IpamNonPluggableBackend._store_ip_allocation(
context, ip['ip_address'], network_id,
ip['subnet_id'], db_port.id)
self._update_db_port(context, db_port, new_port, network_id, new_mac)
return changes
def _test_fixed_ips_for_port(self, context, network_id, fixed_ips,
device_owner):
"""Test fixed IPs for port.
Check that configured subnets are valid prior to allocating any
IPs. Include the subnet_id in the result if only an IP address is
configured.
:raises: InvalidInput, IpAddressInUse, InvalidIpForNetwork,
InvalidIpForSubnet
"""
fixed_ip_set = []
for fixed in fixed_ips:
subnet = self._get_subnet_for_fixed_ip(context, fixed, network_id)
is_auto_addr_subnet = ipv6_utils.is_auto_address_subnet(subnet)
if 'ip_address' in fixed:
# Ensure that the IP's are unique
if not IpamNonPluggableBackend._check_unique_ip(
context, network_id,
subnet['id'], fixed['ip_address']):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=fixed['ip_address'])
if (is_auto_addr_subnet and
device_owner not in
constants.ROUTER_INTERFACE_OWNERS):
msg = (_("IPv6 address %(address)s can not be directly "
"assigned to a port on subnet %(id)s since the "
"subnet is configured for automatic addresses") %
{'address': fixed['ip_address'],
'id': subnet['id']})
raise n_exc.InvalidInput(error_message=msg)
fixed_ip_set.append({'subnet_id': subnet['id'],
'ip_address': fixed['ip_address']})
else:
# A scan for auto-address subnets on the network is done
# separately so that all such subnets (not just those
# listed explicitly here by subnet ID) are associated
# with the port.
if (device_owner in constants.ROUTER_INTERFACE_OWNERS_SNAT or
not is_auto_addr_subnet):
fixed_ip_set.append({'subnet_id': subnet['id']})
self._validate_max_ips_per_port(fixed_ip_set)
return fixed_ip_set
def _allocate_fixed_ips(self, context, fixed_ips, mac_address):
"""Allocate IP addresses according to the configured fixed_ips."""
ips = []
# we need to start with entries that asked for a specific IP in case
# those IPs happen to be next in the line for allocation for ones that
# didn't ask for a specific IP
fixed_ips.sort(key=lambda x: 'ip_address' not in x)
for fixed in fixed_ips:
subnet = self._get_subnet(context, fixed['subnet_id'])
is_auto_addr = ipv6_utils.is_auto_address_subnet(subnet)
if 'ip_address' in fixed:
if not is_auto_addr:
# Remove the IP address from the allocation pool
IpamNonPluggableBackend._allocate_specific_ip(
context, fixed['subnet_id'], fixed['ip_address'])
ips.append({'ip_address': fixed['ip_address'],
'subnet_id': fixed['subnet_id']})
# Only subnet ID is specified => need to generate IP
# from subnet
else:
if is_auto_addr:
ip_address = self._calculate_ipv6_eui64_addr(context,
subnet,
mac_address)
ips.append({'ip_address': ip_address.format(),
'subnet_id': subnet['id']})
else:
subnets = [subnet]
# IP address allocation
result = self._generate_ip(context, subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
return ips
def _update_ips_for_port(self, context, network_id, original_ips,
new_ips, mac_address, device_owner):
"""Add or remove IPs from the port."""
added = []
changes = self._get_changed_ips_for_port(context, original_ips,
new_ips, device_owner)
# Check if the IP's to add are OK
to_add = self._test_fixed_ips_for_port(context, network_id,
changes.add, device_owner)
for ip in changes.remove:
LOG.debug("Port update. Hold %s", ip)
IpamNonPluggableBackend._delete_ip_allocation(context,
network_id,
ip['subnet_id'],
ip['ip_address'])
if to_add:
LOG.debug("Port update. Adding %s", to_add)
added = self._allocate_fixed_ips(context, to_add, mac_address)
return self.Changes(add=added,
original=changes.original,
remove=changes.remove)
def _allocate_ips_for_port(self, context, port):
"""Allocate IP addresses for the port.
If port['fixed_ips'] is set to 'ATTR_NOT_SPECIFIED', allocate IP
addresses for the port. If port['fixed_ips'] contains an IP address or
a subnet_id then allocate an IP address accordingly.
"""
p = port['port']
ips = []
v6_stateless = []
net_id_filter = {'network_id': [p['network_id']]}
subnets = self._get_subnets(context, filters=net_id_filter)
is_router_port = (
p['device_owner'] in constants.ROUTER_INTERFACE_OWNERS_SNAT)
fixed_configured = p['fixed_ips'] is not attributes.ATTR_NOT_SPECIFIED
if fixed_configured:
configured_ips = self._test_fixed_ips_for_port(context,
p["network_id"],
p['fixed_ips'],
p['device_owner'])
ips = self._allocate_fixed_ips(context,
configured_ips,
p['mac_address'])
# For ports that are not router ports, implicitly include all
# auto-address subnets for address association.
if not is_router_port:
v6_stateless += [subnet for subnet in subnets
if ipv6_utils.is_auto_address_subnet(subnet)]
else:
# Split into v4, v6 stateless and v6 stateful subnets
v4 = []
v6_stateful = []
for subnet in subnets:
if subnet['ip_version'] == 4:
v4.append(subnet)
elif ipv6_utils.is_auto_address_subnet(subnet):
if not is_router_port:
v6_stateless.append(subnet)
else:
v6_stateful.append(subnet)
version_subnets = [v4, v6_stateful]
for subnets in version_subnets:
if subnets:
result = IpamNonPluggableBackend._generate_ip(context,
subnets)
ips.append({'ip_address': result['ip_address'],
'subnet_id': result['subnet_id']})
for subnet in v6_stateless:
# IP addresses for IPv6 SLAAC and DHCPv6-stateless subnets
# are implicitly included.
ip_address = self._calculate_ipv6_eui64_addr(context, subnet,
p['mac_address'])
ips.append({'ip_address': ip_address.format(),
'subnet_id': subnet['id']})
return ips
def add_auto_addrs_on_network_ports(self, context, subnet, ipam_subnet):
"""For an auto-address subnet, add addrs for ports on the net."""
with context.session.begin(subtransactions=True):
network_id = subnet['network_id']
port_qry = context.session.query(models_v2.Port)
ports = port_qry.filter(
and_(models_v2.Port.network_id == network_id,
~models_v2.Port.device_owner.in_(
constants.ROUTER_INTERFACE_OWNERS_SNAT)))
for port in ports:
ip_address = self._calculate_ipv6_eui64_addr(
context, subnet, port['mac_address'])
allocated = models_v2.IPAllocation(network_id=network_id,
port_id=port['id'],
ip_address=ip_address,
subnet_id=subnet['id'])
try:
# Do the insertion of each IP allocation entry within
# the context of a nested transaction, so that the entry
# is rolled back independently of other entries whenever
# the corresponding port has been deleted.
with context.session.begin_nested():
context.session.add(allocated)
except db_exc.DBReferenceError:
LOG.debug("Port %s was deleted while updating it with an "
"IPv6 auto-address. Ignoring.", port['id'])
def _calculate_ipv6_eui64_addr(self, context, subnet, mac_addr):
prefix = subnet['cidr']
network_id = subnet['network_id']
ip_address = ipv6_utils.get_ipv6_addr_by_EUI64(
prefix, mac_addr).format()
if not self._check_unique_ip(context, network_id,
subnet['id'], ip_address):
raise n_exc.IpAddressInUse(net_id=network_id,
ip_address=ip_address)
return ip_address
def allocate_subnet(self, context, network, subnet, subnetpool_id):
subnetpool = None
if subnetpool_id:
subnetpool = self._get_subnetpool(context, subnetpool_id)
self._validate_ip_version_with_subnetpool(subnet, subnetpool)
# gateway_ip and allocation pools should be validated or generated
# only for specific request
if subnet['cidr'] is not attributes.ATTR_NOT_SPECIFIED:
subnet['gateway_ip'] = self._gateway_ip_str(subnet,
subnet['cidr'])
# allocation_pools are converted to list of IPRanges
subnet['allocation_pools'] = self._prepare_allocation_pools(
subnet['allocation_pools'],
subnet['cidr'],
subnet['gateway_ip'])
subnet_request = ipam_req.SubnetRequestFactory.get_request(context,
subnet,
subnetpool)
if subnetpool_id:
driver = subnet_alloc.SubnetAllocator(subnetpool, context)
ipam_subnet = driver.allocate_subnet(subnet_request)
subnet_request = ipam_subnet.get_details()
subnet = self._save_subnet(context,
network,
self._make_subnet_args(
subnet_request,
subnet,
subnetpool_id),
subnet['dns_nameservers'],
subnet['host_routes'],
subnet_request)
# ipam_subnet is not expected to be allocated for non pluggable ipam,
# so just return None for it (second element in returned tuple)
return subnet, None
| paninetworks/neutron | neutron/db/ipam_non_pluggable_backend.py | Python | apache-2.0 | 22,516 |
#!/usr/bin/env python
"""
Extract pronunciations from the ELP items.
Outputs a CSV with the orthographic and phonological form on each
line. The phonological form is stripped of syllabification and stress
markers.
"""
# Copyright 2013 Constantine Lignos
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from lingtools.corpus.elp import ELP, NULL
# " is primary stress, % is secondary, . is syllable boundary
DELETION_CHARS = '"%.'
# These represent a reasonable attempt to map the phonemes to
# one-character versions. The distinction between @` and 3` is
# removed; it is not present in most standard phone sets. Flap (4) is
# left alone as it cannot be mapped back to its underlying form.
PHON_REPLACEMENTS = (
# R-colored schwa
("@`", "R"),
("3`", "R"),
# In the ELP it is always `, but some hand output uses '
("3'", "R"),
("@'", "R"),
# Syllabic l
("l=", "L"),
# Move engma to G to leave N for syllabic n.
("N", "G"),
# Syllabic n. Note that N is engma in the original.
("n=", "N"),
# Syllabic m
("m=", "M"),
# dZ to J (like JH in Arpabet)
("dZ", "J"),
# tS to C (like CH in Arpabet)
("tS", "C"),
# aI to Y (like AY in Arpabet)
("aI", "Y"),
# aU to W (like AW in Arpabet)
("aU", "W"),
# OI to 8 (cannot use O like OY in Arpabet, as O is in use)
("OI", "8"),
)
def replace_phons(pron):
"""Replace phonemes using the PHON_REPLACEMENTS table."""
for replacement in PHON_REPLACEMENTS:
pron = pron.replace(*replacement)
return pron
def extract(input_path, output_path, mono_only, cmudict_format, target_sylls):
"""Extract words from the input path and write them to the output."""
with open(output_path, 'wb') as output_file:
elp = ELP(input_path)
# Sort by lowercase version of entry
words = sorted(elp.keys(), key=lambda s: s.lower())
count = 0
for word in words:
entry = elp[word]
# Extract orthography and pron
pron = entry.pron
nsyll = entry.nsyll
# Match syllable numbers if specified
if target_sylls is not None and nsyll != target_sylls:
continue
# Skip non-monomorphs if specified
if mono_only and not entry.monomorph:
continue
# Skip NULL prons, get the length if there is a pron.
if pron == NULL:
continue
else:
n_phon = entry.nphon
# Perform phoneme replacement on the pron
pron = replace_phons(pron)
# Remove stress/syllable markers
pron = pron.translate(None, DELETION_CHARS)
# Check that length matches
if len(pron) != n_phon:
print "Bad pronunciation for {!r}:".format(word)
print "Pron. {!r} of length {}, expected {}.".format(
pron, len(pron), n_phon)
continue
out_line = ("{},{}".format(word, pron) if not cmudict_format else
"{} {}".format(word.upper(), " ".join(pron)))
print >> output_file, out_line
count += 1
print "{} pronunciations written to {}".format(count, output_path)
def main():
"""Parse arguments and call the extractor."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('input', help='input CSV file')
parser.add_argument('output', help='output CSV file')
parser.add_argument('-m', '--mono', action='store_true',
help='output only monomorphemic items')
parser.add_argument('-s', '--sylls', nargs='?', type=int, metavar='n',
help='output only items with n syllables')
parser.add_argument('-c', '--cmudict', action='store_true',
help='output in CMUDict format')
args = parser.parse_args()
extract(args.input, args.output, args.mono, args.cmudict, args.sylls)
if __name__ == "__main__":
main()
| lingtools/lingtools | extract_elp_prons.py | Python | apache-2.0 | 4,566 |
# Copyright (c) 2017 Fujitsu Limited
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as const
from neutron_lib import context
from oslo_utils import uuidutils
from neutron.common import utils
from neutron.objects.logapi import logging_resource as log_object
from neutron.services.logapi.common import constants as log_const
from neutron.services.logapi.common import db_api
from neutron.services.logapi.common import validators
from neutron.services.logapi.rpc import server as server_rpc
from neutron.tests.unit.extensions import test_securitygroup as test_sg
def _create_log(tenant_id, resource_id=None,
target_id=None, event='ALL', enabled=True,):
log_data = {
'id': uuidutils.generate_uuid(),
'name': 'test',
'resource_type': 'security_group',
'project_id': tenant_id,
'event': event,
'enabled': enabled}
if resource_id:
log_data['resource_id'] = resource_id
if target_id:
log_data['target_id'] = target_id
return log_object.Log(**log_data)
class LoggingDBApiTestCase(test_sg.SecurityGroupDBTestCase):
def setUp(self):
super(LoggingDBApiTestCase, self).setUp()
self.context = context.get_admin_context()
self.sg_id, self.port_id, self.tenant_id = self._create_sg_and_port()
self.context.tenant_id = self.tenant_id
def _create_sg_and_port(self):
with self.network() as network, \
self.subnet(network), \
self.security_group() as sg:
sg_id = sg['security_group']['id']
tenant_id = sg['security_group']['tenant_id']
res = self._create_port(
self.fmt, network['network']['id'],
security_groups=[sg_id])
ports_rest = self.deserialize(self.fmt, res)
port_id = ports_rest['port']['id']
return sg_id, port_id, tenant_id
def test_get_logs_bound_port(self):
log = _create_log(target_id=self.port_id, tenant_id=self.tenant_id)
with mock.patch.object(log_object.Log, 'get_objects',
return_value=[log]):
self.assertEqual(
[log], db_api.get_logs_bound_port(self.context, self.port_id))
# Test get log objects with required resource type
calls = [mock.call(self.context, project_id=self.tenant_id,
resource_type=log_const.SECURITY_GROUP,
enabled=True)]
log_object.Log.get_objects.assert_has_calls(calls)
def test_get_logs_not_bound_port(self):
fake_sg_id = uuidutils.generate_uuid()
log = _create_log(resource_id=fake_sg_id, tenant_id=self.tenant_id)
with mock.patch.object(log_object.Log, 'get_objects',
return_value=[log]):
self.assertEqual(
[], db_api.get_logs_bound_port(self.context, self.port_id))
# Test get log objects with required resource type
calls = [mock.call(self.context, project_id=self.tenant_id,
resource_type=log_const.SECURITY_GROUP,
enabled=True)]
log_object.Log.get_objects.assert_has_calls(calls)
def test_get_logs_bound_sg(self):
log = _create_log(resource_id=self.sg_id, tenant_id=self.tenant_id)
with mock.patch.object(log_object.Log, 'get_objects',
return_value=[log]):
self.assertEqual(
[log], db_api.get_logs_bound_sg(self.context, self.sg_id))
# Test get log objects with required resource type
calls = [mock.call(self.context, project_id=self.tenant_id,
resource_type=log_const.SECURITY_GROUP,
enabled=True)]
log_object.Log.get_objects.assert_has_calls(calls)
def test_get_logs_not_bound_sg(self):
with self.network() as network, \
self.subnet(network), \
self.security_group() as sg:
sg2_id = sg['security_group']['id']
res = self._create_port(
self.fmt, network['network']['id'],
security_groups=[sg2_id])
port2_id = self.deserialize(self.fmt, res)['port']['id']
log = _create_log(target_id=port2_id, tenant_id=self.tenant_id)
with mock.patch.object(log_object.Log, 'get_objects',
return_value=[log]):
self.assertEqual(
[], db_api.get_logs_bound_sg(self.context, self.sg_id))
# Test get log objects with required resource type
calls = [mock.call(self.context, project_id=self.tenant_id,
resource_type=log_const.SECURITY_GROUP,
enabled=True)]
log_object.Log.get_objects.assert_has_calls(calls)
def test__get_ports_being_logged(self):
log1 = _create_log(target_id=self.port_id,
tenant_id=self.tenant_id)
log2 = _create_log(resource_id=self.sg_id,
tenant_id=self.tenant_id)
log3 = _create_log(target_id=self.port_id,
resource_id=self.tenant_id,
tenant_id=self.tenant_id)
log4 = _create_log(tenant_id=self.tenant_id)
with mock.patch.object(
validators, 'validate_log_type_for_port', return_value=True):
ports_log1 = db_api._get_ports_being_logged(self.context, log1)
ports_log2 = db_api._get_ports_being_logged(self.context, log2)
ports_log3 = db_api._get_ports_being_logged(self.context, log3)
ports_log4 = db_api._get_ports_being_logged(self.context, log4)
self.assertEqual([self.port_id], ports_log1)
self.assertEqual([self.port_id], ports_log2)
self.assertEqual([self.port_id], ports_log3)
self.assertEqual([self.port_id], ports_log4)
def test__get_ports_being_logged_not_supported_log_type(self):
log = _create_log(tenant_id=self.tenant_id)
with mock.patch.object(
validators, 'validate_log_type_for_port', return_value=False):
ports_log = db_api._get_ports_being_logged(self.context, log)
self.assertEqual([], ports_log)
class LoggingRpcCallbackTestCase(test_sg.SecurityGroupDBTestCase):
def setUp(self):
super(LoggingRpcCallbackTestCase, self).setUp()
self.context = context.get_admin_context()
self.rpc_callback = server_rpc.LoggingApiSkeleton()
def test_get_sg_log_info_for_create_or_update_log(self):
with self.network() as network, \
self.subnet(network), \
self.security_group() as sg:
sg_id = sg['security_group']['id']
tenant_id = sg['security_group']['tenant_id']
rule1 = self._build_security_group_rule(
sg_id,
'ingress', const.PROTO_NAME_TCP, '22', '22',
)
rule2 = self._build_security_group_rule(
sg_id,
'egress', const.PROTO_NAME_TCP,
remote_ip_prefix='10.0.0.1',
)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_port(
self.fmt, network['network']['id'],
security_groups=[sg_id])
ports_rest = self.deserialize(self.fmt, res)
port_id = ports_rest['port']['id']
log = _create_log(resource_id=sg_id, tenant_id=tenant_id)
with mock.patch.object(
server_rpc,
'get_rpc_method',
return_value=server_rpc.get_sg_log_info_for_log_resources
):
with mock.patch.object(validators,
'validate_log_type_for_port',
return_value=True):
ports_log = (
self.rpc_callback.get_sg_log_info_for_log_resources(
self.context,
resource_type=log_const.SECURITY_GROUP,
log_resources=[log])
)
expected = [{
'event': log.event,
'id': log.id,
'ports_log': [{
'port_id': port_id,
'security_group_rules': [
{'direction': 'egress',
'ethertype': u'IPv4',
'security_group_id': sg_id},
{'direction': 'egress',
'ethertype': u'IPv6',
'security_group_id': sg_id},
{'direction': 'ingress',
'ethertype': u'IPv4',
'port_range_max': 22,
'port_range_min': 22,
'protocol': u'tcp',
'security_group_id': sg_id},
{'direction': 'egress',
'ethertype': u'IPv4',
'protocol': u'tcp',
'dest_ip_prefix':
utils.AuthenticIPNetwork('10.0.0.1/32'),
'security_group_id': sg_id}]
}],
'project_id': tenant_id
}]
self.assertEqual(expected, ports_log)
self._delete('ports', port_id)
def test_get_sg_log_info_for_port_added_event(self):
with self.network() as network, \
self.subnet(network), \
self.security_group() as sg:
sg_id = sg['security_group']['id']
tenant_id = sg['security_group']['tenant_id']
rule1 = self._build_security_group_rule(
sg_id,
'ingress', const.PROTO_NAME_TCP, '11', '13',
remote_ip_prefix='10.0.0.1',
)
rule2 = self._build_security_group_rule(
sg_id,
'egress', const.PROTO_NAME_ICMP,
)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
self._create_security_group_rule(self.fmt, rules)
res = self._create_port(
self.fmt, network['network']['id'],
security_groups=[sg_id],
tenant_id=tenant_id
)
ports_rest = self.deserialize(self.fmt, res)
port_id = ports_rest['port']['id']
log = _create_log(tenant_id=tenant_id)
with mock.patch.object(
log_object.Log, 'get_objects', return_value=[log]):
with mock.patch.object(
server_rpc,
'get_rpc_method',
return_value=server_rpc.get_sg_log_info_for_port
):
with mock.patch.object(
validators,
'validate_log_type_for_port',
return_value=True):
ports_log = (
self.rpc_callback.get_sg_log_info_for_port(
self.context,
resource_type=log_const.SECURITY_GROUP,
port_id=port_id)
)
expected = [{
'event': log.event,
'id': log.id,
'ports_log': [{
'port_id': port_id,
'security_group_rules': [
{'direction': 'egress',
'ethertype': u'IPv4',
'security_group_id': sg_id},
{'direction': 'egress',
'ethertype': u'IPv6',
'security_group_id': sg_id},
{'direction': 'ingress',
'ethertype': u'IPv4',
'port_range_max': 13,
'port_range_min': 11,
'protocol': u'tcp',
'source_ip_prefix':
utils.AuthenticIPNetwork(
'10.0.0.1/32'),
'security_group_id': sg_id},
{'direction': 'egress',
'ethertype': u'IPv4',
'protocol': u'icmp',
'security_group_id': sg_id}]
}],
'project_id': tenant_id
}]
self.assertEqual(expected, ports_log)
self._delete('ports', port_id)
| noironetworks/neutron | neutron/tests/unit/services/logapi/common/test_db_api.py | Python | apache-2.0 | 14,273 |
import sys
from flexmock import flexmock
import inject
from mcloud.events import EventBus
from mcloud.txdocker import IDockerClient, DockerTwistedClient
from mcloud.util import txtimeout
import pytest
from mcloud.remote import Server, Client, ApiError, Task, ApiRpcServer
from twisted.internet import reactor, defer
from twisted.python import log
import txredisapi as redis
class MockServer(Server):
message = None
def on_message(self, client, message, isBinary=False):
self.message = message
class MockClient(Client):
message = None
def on_message(self, message, isBinary=False):
self.message = message
def sleep(secs):
d = defer.Deferred()
reactor.callLater(secs, d.callback, None)
return d
#@pytest.inlineCallbacks
#def test_exchange():
# inject.clear()
#
# #log.startLogging(sys.stdout)
#
# server = MockServer(port=9999)
# server.bind()
#
# assert len(server.clients) == 0
#
# client = MockClient(port=9999)
# yield client.connect()
#
# assert len(server.clients) == 1
#
# log.msg('Sending data')
# yield client.send('boo')
#
# yield sleep(0.1)
#
# assert server.message == 'boo'
#
# yield server.clients[0].sendMessage('baz')
#
# yield sleep(0.1)
#
# assert client.message == 'baz'
#
# client.shutdown()
# server.shutdown()
#
# yield sleep(0.1)
@pytest.inlineCallbacks
def test_request_response():
#-----------------------------------
# preparations
#-----------------------------------
# cleanup a bit
inject.clear()
def my_config(binder):
binder.bind('settings', None)
inject.configure(my_config)
# log.startLogging(sys.stdout)
server = Server(port=9998, no_ssl=True)
server.bind()
client = Client(port=9998, no_ssl=True)
yield client.connect()
response = yield client.call_sync('ping')
assert response == 'pong'
client.shutdown()
server.shutdown()
@pytest.inlineCallbacks
def test_request_response_no_such_command():
#-----------------------------------
# preparations
#-----------------------------------
# cleanup a bit
inject.clear()
def my_config(binder):
binder.bind('settings', None)
inject.configure(my_config)
log.startLogging(sys.stdout)
server = Server(port=9996, no_ssl=True)
server.bind()
client = Client(port=9996, no_ssl=True)
yield client.connect()
with pytest.raises(ApiError):
yield client.call_sync('hoho')
client.shutdown()
server.shutdown()
@pytest.inlineCallbacks
def test_tasks():
#-----------------------------------
# preparations
#-----------------------------------
# cleanup a bit
inject.clear()
rc = yield redis.Connection(dbid=2)
eb = EventBus(rc)
yield eb.connect()
def my_config(binder):
binder.bind(redis.Connection, rc)
binder.bind(EventBus, eb)
binder.bind('settings', None)
inject.configure(my_config)
yield rc.flushdb()
api = inject.instance(ApiRpcServer)
#-----------------------------------
# Test itself
#-----------------------------------
# this will emulate some long-running process
task_defered = defer.Deferred()
# this is mock that will execute our long-running process
task = flexmock()
task.should_receive('foo').with_args(int, 123, 'test').once().and_return(task_defered)
# register our task
api.tasks['baz'] = task.foo
# start server -> real server on tcp port
server = Server(port=9997, no_ssl=True)
server.bind()
# real client connecton here
client = Client(port=9997, no_ssl=True)
yield client.connect()
# client calls a task
task = Task('baz')
yield client.call(task, 123, 'test')
yield sleep(0.1)
assert task.id > 0
assert task.name == 'baz'
assert task.is_running is True
assert len(server.rpc_server.tasks_running) == 1
assert server.rpc_server.tasks_running[task.id]['name'] == 'baz'
assert len(server.rpc_server.task_list()) == 1
# no data should be on client
yield sleep(0.1)
assert task.data == []
assert task.response is None
# now server sends some progress
yield server.clients[0].send_event('task.progress.%s' % task.id, 'nami-nami')
# and client should receive this data
yield sleep(0.1)
assert task.data == ['nami-nami']
assert task.is_running is True
assert task.response is None
# now our long-running process stopped and returned some result
yield task_defered.callback('this is respnse')
# and client should recieve this resul
yield sleep(0.1)
assert task.data == ['nami-nami']
assert task.is_running == False
assert task.response == 'this is respnse'
assert len(server.rpc_server.tasks_running) == 0
assert len(server.rpc_server.task_list()) == 0
#-----------------------------------
# Cleanup
#-----------------------------------
client.shutdown()
server.shutdown()
yield sleep(0.1)
@pytest.inlineCallbacks
def test_task_terminate():
#-----------------------------------
# preparations
#-----------------------------------
# cleanup a bit
inject.clear()
rc = yield redis.Connection(dbid=2)
eb = EventBus(rc)
yield eb.connect()
def my_config(binder):
binder.bind(redis.Connection, rc)
binder.bind(EventBus, eb)
binder.bind('settings', None)
inject.configure(my_config)
yield rc.flushdb()
api = inject.instance(ApiRpcServer)
#-----------------------------------
# Test itself
#-----------------------------------
# this will emulate some long-running process
task_defered = defer.Deferred()
# this is mock that will execute our long-running process
task = flexmock()
task.should_receive('foo').with_args(int, 123, 'test').once().and_return(task_defered)
# register our task
api.tasks['baz'] = task.foo
# start server -> real server on tcp port
server = Server(port=9987, no_ssl=True)
server.bind()
# real client connecton here
client = Client(port=9987, no_ssl=True)
yield client.connect()
# client calls a task
task = Task('baz')
yield client.call(task, 123, 'test')
yield sleep(0.1)
assert task.id > 0
assert task.name == 'baz'
assert task.is_running is True
# now client terminates the task
yield sleep(0.1)
client.terminate_task(task.id)
yield sleep(0.1)
assert task.is_running is False
#-----------------------------------
# Cleanup
#-----------------------------------
client.shutdown()
server.shutdown()
yield sleep(0.1) | modera/mcloud | tests/test_remote.py | Python | apache-2.0 | 6,741 |
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provide strategies for allocating network segments. (vlan, vxlan, etc)
"""
from quark.db import api as db_api
from quark import exceptions as quark_exceptions
from oslo_log import log as logging
from oslo_utils import timeutils
import itertools
import random
LOG = logging.getLogger(__name__)
class BaseSegmentAllocation(object):
segment_type = None
def _validate_range(self, context, sa_range):
raise NotImplementedError()
def _chunks(self, iterable, chunk_size):
"""Chunks data into chunk with size<=chunk_size."""
iterator = iter(iterable)
chunk = list(itertools.islice(iterator, 0, chunk_size))
while chunk:
yield chunk
chunk = list(itertools.islice(iterator, 0, chunk_size))
def _check_collisions(self, new_range, existing_ranges):
"""Check for overlapping ranges."""
def _contains(num, r1):
return (num >= r1[0] and
num <= r1[1])
def _is_overlap(r1, r2):
return (_contains(r1[0], r2) or
_contains(r1[1], r2) or
_contains(r2[0], r1) or
_contains(r2[1], r1))
for existing_range in existing_ranges:
if _is_overlap(new_range, existing_range):
return True
return False
def _make_segment_allocation_dict(self, id, sa_range):
return dict(
id=id,
segment_id=sa_range["segment_id"],
segment_type=sa_range["segment_type"],
segment_allocation_range_id=sa_range["id"],
deallocated=True
)
def _populate_range(self, context, sa_range):
first_id = sa_range["first_id"]
last_id = sa_range["last_id"]
id_range = xrange(first_id, last_id + 1)
LOG.info("Starting segment allocation population for "
"range:%s size:%s."
% (sa_range["id"], len(id_range)))
total_added = 0
for chunk in self._chunks(id_range, 5000):
sa_dicts = []
for segment_id in chunk:
sa_dict = self._make_segment_allocation_dict(
segment_id, sa_range)
sa_dicts.append(sa_dict)
db_api.segment_allocation_range_populate_bulk(context, sa_dicts)
context.session.flush()
total_added = total_added + len(sa_dicts)
LOG.info("Populated %s/%s segment ids for range:%s"
% (total_added, len(id_range), sa_range["id"]))
LOG.info("Finished segment allocation population for "
"range:%s size:%s."
% (sa_range["id"], len(id_range)))
def _create_range(self, context, sa_range):
with context.session.begin(subtransactions=True):
# Validate any range-specific things, like min/max ids.
self._validate_range(context, sa_range)
# Check any existing ranges for this segment for collisions
segment_id = sa_range["segment_id"]
segment_type = sa_range["segment_type"]
filters = {"segment_id": segment_id,
"segment_type": segment_type}
existing_ranges = db_api.segment_allocation_range_find(
context, lock_mode=True, scope=db_api.ALL, **filters)
collides = self._check_collisions(
(sa_range["first_id"], sa_range["last_id"]),
[(r["first_id"], r["last_id"]) for r in existing_ranges])
if collides:
raise quark_exceptions.InvalidSegmentAllocationRange(
msg=("The specified allocation collides with existing "
"range"))
return db_api.segment_allocation_range_create(
context, **sa_range)
def create_range(self, context, sa_range):
return self._create_range(context, sa_range)
def populate_range(self, context, sa_range):
return self._populate_range(context, sa_range)
def _try_allocate(self, context, segment_id, network_id):
"""Find a deallocated network segment id and reallocate it.
NOTE(morgabra) This locks the segment table, but only the rows
in use by the segment, which is pretty handy if we ever have
more than 1 segment or segment type.
"""
LOG.info("Attempting to allocate segment for network %s "
"segment_id %s segment_type %s"
% (network_id, segment_id, self.segment_type))
filter_dict = {
"segment_id": segment_id,
"segment_type": self.segment_type,
"do_not_use": False
}
available_ranges = db_api.segment_allocation_range_find(
context, scope=db_api.ALL, **filter_dict)
available_range_ids = [r["id"] for r in available_ranges]
try:
with context.session.begin(subtransactions=True):
# Search for any deallocated segment ids for the
# given segment.
filter_dict = {
"deallocated": True,
"segment_id": segment_id,
"segment_type": self.segment_type,
"segment_allocation_range_ids": available_range_ids
}
# NOTE(morgabra) We select 100 deallocated segment ids from
# the table here, and then choose 1 randomly. This is to help
# alleviate the case where an uncaught exception might leave
# an allocation active on a remote service but we do not have
# a record of it locally. If we *do* end up choosing a
# conflicted id, the caller should simply allocate another one
# and mark them all as reserved. If a single object has
# multiple reservations on the same segment, they will not be
# deallocated, and the operator must resolve the conficts
# manually.
allocations = db_api.segment_allocation_find(
context, lock_mode=True, **filter_dict).limit(100).all()
if allocations:
allocation = random.choice(allocations)
# Allocate the chosen segment.
update_dict = {
"deallocated": False,
"deallocated_at": None,
"network_id": network_id
}
allocation = db_api.segment_allocation_update(
context, allocation, **update_dict)
LOG.info("Allocated segment %s for network %s "
"segment_id %s segment_type %s"
% (allocation["id"], network_id, segment_id,
self.segment_type))
return allocation
except Exception:
LOG.exception("Error in segment reallocation.")
LOG.info("Cannot find reallocatable segment for network %s "
"segment_id %s segment_type %s"
% (network_id, segment_id, self.segment_type))
def allocate(self, context, segment_id, network_id):
allocation = self._try_allocate(
context, segment_id, network_id)
if allocation:
return allocation
raise quark_exceptions.SegmentAllocationFailure(
segment_id=segment_id, segment_type=self.segment_type)
def _try_deallocate(self, context, segment_id, network_id):
LOG.info("Attempting to deallocate segment for network %s "
"segment_id %s segment_type %s"
% (network_id, segment_id, self.segment_type))
with context.session.begin(subtransactions=True):
filter_dict = {
"deallocated": False,
"segment_id": segment_id,
"segment_type": self.segment_type,
"network_id": network_id
}
allocations = db_api.segment_allocation_find(
context, **filter_dict).all()
if not allocations:
LOG.info("Could not find allocated segment for network %s "
"segment_id %s segment_type %s for deallocate."
% (network_id, segment_id, self.segment_type))
return
if len(allocations) > 1:
LOG.error("Found multiple allocated segments for network %s "
"segment_id %s segment_type %s for deallocate. "
"Refusing to deallocate, these allocations are now "
"orphaned."
% (network_id, segment_id, self.segment_type))
return
allocation = allocations[0]
# Deallocate the found segment.
update_dict = {
"deallocated": True,
"deallocated_at": timeutils.utcnow(),
"network_id": None
}
allocation = db_api.segment_allocation_update(
context, allocation, **update_dict)
LOG.info("Deallocated %s allocated segment(s) for network %s "
"segment_id %s segment_type %s"
% (len(allocations), network_id, segment_id,
self.segment_type))
def deallocate(self, context, segment_id, network_id):
self._try_deallocate(context, segment_id, network_id)
class VXLANSegmentAllocation(BaseSegmentAllocation):
VXLAN_MIN = 1
VXLAN_MAX = (2 ** 24) - 1
segment_type = 'vxlan'
def _validate_range(self, context, sa_range):
# Validate that the range is legal and makes sense.
try:
first_id = sa_range["first_id"]
last_id = sa_range["last_id"]
first_id, last_id = (int(first_id), int(last_id))
assert first_id >= self.VXLAN_MIN
assert last_id <= self.VXLAN_MAX
assert first_id <= last_id
except Exception:
raise quark_exceptions.InvalidSegmentAllocationRange(
msg="The specified allocation range is invalid")
class SegmentAllocationRegistry(object):
def __init__(self):
self.strategies = {
VXLANSegmentAllocation.segment_type: VXLANSegmentAllocation(),
}
def is_valid_strategy(self, strategy_name):
if strategy_name in self.strategies:
return True
return False
def get_strategy(self, strategy_name):
if self.is_valid_strategy(strategy_name):
return self.strategies[strategy_name]
raise Exception("Segment allocation strategy %s not found."
% (strategy_name))
REGISTRY = SegmentAllocationRegistry()
| alanquillin/quark | quark/segment_allocations.py | Python | apache-2.0 | 11,496 |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs oldisim.
oldisim is a framework to support benchmarks that emulate Online Data-Intensive
(OLDI) workloads, such as web search and social networking. oldisim includes
sample workloads built on top of this framework.
With its default config, oldisim models an example search topology. A user query
is first processed by a front-end server, which then eventually fans out the
query to a large number of leaf nodes. The latency is measured at the root of
the tree, and often increases with the increase of fan-out. oldisim reports a
scaling efficiency for a given topology. The scaling efficiency is defined
as queries per second (QPS) at the current fan-out normalized to QPS at fan-out
1 with ISO root latency.
Sample command line:
./pkb.py --benchmarks=oldisim --project='YOUR_PROJECT' --oldisim_num_leaves=4
--oldisim_fanout=1,2,3,4 --oldisim_latency_target=40
--oldisim_latency_metric=avg
The above command will build a tree with one root node and four leaf nodes. The
average latency target is 40ms. The root node will vary the fanout from 1 to 4
and measure the scaling efficiency.
"""
import logging
import re
import time
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import oldisim_dependencies
FLAGS = flags.FLAGS
flags.DEFINE_integer('oldisim_num_leaves', 4, 'number of leaf nodes',
lower_bound=1, upper_bound=64)
flags.DEFINE_list('oldisim_fanout', [],
'a list of fanouts to be tested. '
'a root can connect to a subset of leaf nodes (fanout). '
'the value of fanout has to be smaller than num_leaves.')
flags.DEFINE_enum('oldisim_latency_metric', 'avg',
['avg', '50p', '90p', '95p', '99p', '99.9p'],
'Allowable metrics for end-to-end latency')
flags.DEFINE_float('oldisim_latency_target', '30', 'latency target in ms')
NUM_DRIVERS = 1
NUM_ROOTS = 1
BENCHMARK_NAME = 'oldisim'
BENCHMARK_CONFIG = """
oldisim:
description: >
Run oldisim. Specify the number of leaf
nodes with --oldisim_num_leaves
vm_groups:
default:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
"""Decide number of vms needed to run oldisim."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = (FLAGS.oldisim_num_leaves
+ NUM_DRIVERS + NUM_ROOTS)
return config
def InstallAndBuild(vm):
"""Install and build oldisim on the target vm.
Args:
vm: A vm instance that runs oldisim.
"""
logging.info('prepare oldisim on %s', vm)
vm.Install('oldisim_dependencies')
def Prepare(benchmark_spec):
"""Install and build oldisim on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
leaf_vms = [vm for vm_idx, vm in enumerate(vms)
if vm_idx >= (NUM_DRIVERS + NUM_ROOTS)]
if vms:
vm_util.RunThreaded(InstallAndBuild, vms)
# Launch job on the leaf nodes.
leaf_server_bin = oldisim_dependencies.BinaryPath('LeafNode')
for vm in leaf_vms:
leaf_cmd = '%s --threads=%s' % (leaf_server_bin, vm.num_cpus)
vm.RemoteCommand('%s &> /dev/null &' % leaf_cmd)
def SetupRoot(root_vm, leaf_vms):
"""Connect a root node to a list of leaf nodes.
Args:
root_vm: A root vm instance.
leaf_vms: A list of leaf vm instances.
"""
fanout_args = ' '.join(['--leaf=%s' % i.internal_ip
for i in leaf_vms])
root_server_bin = oldisim_dependencies.BinaryPath('ParentNode')
root_cmd = '%s --threads=%s %s' % (root_server_bin, root_vm.num_cpus,
fanout_args)
logging.info('Root cmdline: %s', root_cmd)
root_vm.RemoteCommand('%s &> /dev/null &' % root_cmd)
def ParseOutput(oldisim_output):
"""Parses the output from oldisim.
Args:
oldisim_output: A string containing the text of oldisim output.
Returns:
A tuple of (peak_qps, peak_lat, target_qps, target_lat).
"""
re_peak = re.compile(r'peak qps = (?P<qps>\S+), latency = (?P<lat>\S+)')
re_target = re.compile(r'measured_qps = (?P<qps>\S+), latency = (?P<lat>\S+)')
for line in oldisim_output.splitlines():
match = re.search(re_peak, line)
if match:
peak_qps = float(match.group('qps'))
peak_lat = float(match.group('lat'))
target_qps = float(peak_qps)
target_lat = float(peak_lat)
continue
match = re.search(re_target, line)
if match:
target_qps = float(match.group('qps'))
target_lat = float(match.group('lat'))
return peak_qps, peak_lat, target_qps, target_lat
def RunLoadTest(benchmark_spec, fanout):
"""Run Loadtest for a given topology.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
fanout: Request is first processed by a root node, which then
fans out to a subset of leaf nodes.
Returns:
A tuple of (peak_qps, peak_lat, target_qps, target_lat).
"""
assert fanout <= FLAGS.oldisim_num_leaves, (
'The number of leaf nodes a root node connected to is defined by the '
'flag fanout. Its current value %s is bigger than the total number of '
'leaves %s.' % (fanout, FLAGS.oldisim_num_leaves))
vms = benchmark_spec.vms
driver_vms = []
root_vms = []
leaf_vms = []
for vm_index, vm in enumerate(vms):
if vm_index < NUM_DRIVERS:
driver_vms.append(vm)
elif vm_index < (NUM_DRIVERS + NUM_ROOTS):
root_vms.append(vm)
else:
leaf_vms.append(vm)
leaf_vms = leaf_vms[:fanout]
for root_vm in root_vms:
SetupRoot(root_vm, leaf_vms)
driver_vm = driver_vms[0]
driver_binary = oldisim_dependencies.BinaryPath('DriverNode')
launch_script = oldisim_dependencies.Path('workloads/search/search_qps.sh')
driver_args = ' '.join(['--server=%s' % i.internal_ip
for i in root_vms])
# Make sure server is up.
time.sleep(5)
driver_cmd = '%s -s %s:%s -t 30 -- %s %s --threads=%s --depth=16' % (
launch_script, FLAGS.oldisim_latency_metric, FLAGS.oldisim_latency_target,
driver_binary, driver_args, driver_vm.num_cpus)
logging.info('Driver cmdline: %s', driver_cmd)
stdout, _ = driver_vm.RemoteCommand(driver_cmd, should_log=True)
return ParseOutput(stdout)
def Run(benchmark_spec):
"""Run oldisim on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
results = []
qps_dict = dict()
vms = benchmark_spec.vms
vm = vms[0]
fanout_list = set([1, FLAGS.oldisim_num_leaves])
for fanout in map(int, FLAGS.oldisim_fanout):
if fanout > 1 and fanout < FLAGS.oldisim_num_leaves:
fanout_list.add(fanout)
metadata = {'num_cpus': vm.num_cpus}
metadata.update(vm.GetMachineTypeDict())
for fanout in sorted(fanout_list):
qps = RunLoadTest(benchmark_spec, fanout)[2]
qps_dict[fanout] = qps
if fanout == 1:
base_qps = qps
name = 'Scaling efficiency of %s leaves' % fanout
scaling_efficiency = round(min(qps_dict[fanout] / base_qps, 1), 2)
results.append(sample.Sample(name, scaling_efficiency, '', metadata))
return results
def Cleanup(benchmark_spec): # pylint: disable=unused-argument
"""Cleanup oldisim on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
for vm_index, vm in enumerate(vms):
if vm_index >= NUM_DRIVERS and vm_index < (NUM_DRIVERS + NUM_ROOTS):
vm.RemoteCommand('sudo pkill ParentNode')
elif vm_index >= (NUM_DRIVERS + NUM_ROOTS):
vm.RemoteCommand('sudo pkill LeafNode')
| meteorfox/PerfKitBenchmarker | perfkitbenchmarker/linux_benchmarks/oldisim_benchmark.py | Python | apache-2.0 | 8,628 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-24 05:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=30)),
('password', models.CharField(max_length=30)),
],
),
]
| sdu14SoftwareEngineering/GameOfLife_WEB | game/migrations/0001_initial.py | Python | apache-2.0 | 615 |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This library allows to export tracing data to an OTLP collector.
Usage
-----
The **OTLP Span Exporter** allows to export `OpenTelemetry`_ traces to the
`OTLP`_ collector.
You can configure the exporter with the following environment variables:
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_PROTOCOL`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_COMPRESSION`
- :envvar:`OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE`
- :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT`
- :envvar:`OTEL_EXPORTER_OTLP_PROTOCOL`
- :envvar:`OTEL_EXPORTER_OTLP_HEADERS`
- :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT`
- :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION`
- :envvar:`OTEL_EXPORTER_OTLP_CERTIFICATE`
.. _OTLP: https://github.com/open-telemetry/opentelemetry-collector/
.. _OpenTelemetry: https://github.com/open-telemetry/opentelemetry-python/
.. code:: python
from opentelemetry import trace
from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
# Resource can be required for some backends, e.g. Jaeger
# If resource wouldn't be set - traces wouldn't appears in Jaeger
resource = Resource(attributes={
"service.name": "service"
})
trace.set_tracer_provider(TracerProvider(resource=resource))
tracer = trace.get_tracer(__name__)
otlp_exporter = OTLPSpanExporter()
span_processor = BatchSpanProcessor(otlp_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
with tracer.start_as_current_span("foo"):
print("Hello world!")
API
---
"""
import enum
class Compression(enum.Enum):
NoCompression = "none"
Deflate = "deflate"
Gzip = "gzip"
| open-telemetry/opentelemetry-python | exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/__init__.py | Python | apache-2.0 | 2,513 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMware VMDK driver.
"""
import mox
from cinder import exception
from cinder.image import glance
from cinder import test
from cinder import units
from cinder.volume import configuration
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim
from cinder.volume.drivers.vmware import vim_util
from cinder.volume.drivers.vmware import vmdk
from cinder.volume.drivers.vmware import vmware_images
from cinder.volume.drivers.vmware import volumeops
class FakeVim(object):
@property
def service_content(self):
return mox.MockAnything()
@property
def client(self):
return mox.MockAnything()
def Login(self, session_manager, userName, password):
return mox.MockAnything()
class FakeTaskInfo(object):
def __init__(self, state, result=None):
self.state = state
self.result = result
class FakeError(object):
def __init__(self):
self.localizedMessage = None
self.error = FakeError()
class FakeMor(object):
def __init__(self, type, val):
self._type = type
self.value = val
class FakeObject(object):
def __init__(self):
self._fields = {}
def __setitem__(self, key, value):
self._fields[key] = value
def __getitem__(self, item):
return self._fields[item]
class FakeManagedObjectReference(object):
def __init__(self, lis=[]):
self.ManagedObjectReference = lis
class FakeDatastoreSummary(object):
def __init__(self, freeSpace, capacity, datastore=None, name=None):
self.freeSpace = freeSpace
self.capacity = capacity
self.datastore = datastore
self.name = name
class FakeSnapshotTree(object):
def __init__(self, tree=None, name=None,
snapshot=None, childSnapshotList=None):
self.rootSnapshotList = tree
self.name = name
self.snapshot = snapshot
self.childSnapshotList = childSnapshotList
class FakeElem(object):
def __init__(self, prop_set=None):
self.propSet = prop_set
class FakeProp(object):
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class FakeRetrieveResult(object):
def __init__(self, objects, token):
self.objects = objects
self.token = token
class FakeObj(object):
def __init__(self, obj=None):
self.obj = obj
class VMwareEsxVmdkDriverTestCase(test.TestCase):
"""Test class for VMwareEsxVmdkDriver."""
IP = 'localhost'
USERNAME = 'username'
PASSWORD = 'password'
VOLUME_FOLDER = 'cinder-volumes'
API_RETRY_COUNT = 3
TASK_POLL_INTERVAL = 5.0
IMG_TX_TIMEOUT = 10
MAX_OBJECTS = 100
def setUp(self):
super(VMwareEsxVmdkDriverTestCase, self).setUp()
self._config = mox.MockObject(configuration.Configuration)
self._config.append_config_values(mox.IgnoreArg())
self._config.vmware_host_ip = self.IP
self._config.vmware_host_username = self.USERNAME
self._config.vmware_host_password = self.PASSWORD
self._config.vmware_wsdl_location = None
self._config.vmware_volume_folder = self.VOLUME_FOLDER
self._config.vmware_api_retry_count = self.API_RETRY_COUNT
self._config.vmware_task_poll_interval = self.TASK_POLL_INTERVAL
self._config.vmware_image_transfer_timeout_secs = self.IMG_TX_TIMEOUT
self._config.vmware_max_objects_retrieval = self.MAX_OBJECTS
self._driver = vmdk.VMwareEsxVmdkDriver(configuration=self._config)
api_retry_count = self._config.vmware_api_retry_count,
task_poll_interval = self._config.vmware_task_poll_interval,
self._session = api.VMwareAPISession(self.IP, self.USERNAME,
self.PASSWORD, api_retry_count,
task_poll_interval,
create_session=False)
self._volumeops = volumeops.VMwareVolumeOps(self._session,
self.MAX_OBJECTS)
self._vim = FakeVim()
def test_retry(self):
"""Test Retry."""
class TestClass(object):
def __init__(self):
self.counter1 = 0
@api.Retry(max_retry_count=2, inc_sleep_time=0.001,
exceptions=(Exception))
def fail(self):
self.counter1 += 1
raise exception.CinderException('Fail')
test_obj = TestClass()
self.assertRaises(exception.CinderException, test_obj.fail)
self.assertEqual(test_obj.counter1, 3)
def test_create_session(self):
"""Test create_session."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.ReplayAll()
self._session.create_session()
m.UnsetStubs()
m.VerifyAll()
def test_do_setup(self):
"""Test do_setup."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.ReplayAll()
self._driver.do_setup(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_check_for_setup_error(self):
"""Test check_for_setup_error."""
self._driver.check_for_setup_error()
def test_get_volume_stats(self):
"""Test get_volume_stats."""
stats = self._driver.get_volume_stats()
self.assertEqual(stats['vendor_name'], 'VMware')
self.assertEqual(stats['driver_version'], '1.0')
self.assertEqual(stats['storage_protocol'], 'LSI Logic SCSI')
self.assertEqual(stats['reserved_percentage'], 0)
self.assertEqual(stats['total_capacity_gb'], 'unknown')
self.assertEqual(stats['free_capacity_gb'], 'unknown')
def test_create_volume(self):
"""Test create_volume."""
self._driver.create_volume(mox.IgnoreArg())
def test_success_wait_for_task(self):
"""Test successful wait_for_task."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
result = FakeMor('VirtualMachine', 'my_vm')
success_task_info = FakeTaskInfo('success', result=result)
m.StubOutWithMock(vim_util, 'get_object_property')
vim_util.get_object_property(self._session.vim,
mox.IgnoreArg(),
'info').AndReturn(success_task_info)
m.ReplayAll()
ret = self._session.wait_for_task(mox.IgnoreArg())
self.assertEqual(ret.result, result)
m.UnsetStubs()
m.VerifyAll()
def test_failed_wait_for_task(self):
"""Test failed wait_for_task."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
failed_task_info = FakeTaskInfo('failed')
m.StubOutWithMock(vim_util, 'get_object_property')
vim_util.get_object_property(self._session.vim,
mox.IgnoreArg(),
'info').AndReturn(failed_task_info)
m.ReplayAll()
self.assertRaises(error_util.VimFaultException,
self._session.wait_for_task,
mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_continue_retrieval(self):
"""Test continue_retrieval."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
self._session.invoke_api(vim_util, 'continue_retrieval',
self._vim, mox.IgnoreArg())
m.ReplayAll()
self._volumeops.continue_retrieval(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_cancel_retrieval(self):
"""Test cancel_retrieval."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
self._session.invoke_api(vim_util, 'cancel_retrieval',
self._vim, mox.IgnoreArg())
m.ReplayAll()
self._volumeops.cancel_retrieval(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_get_backing(self):
"""Test get_backing."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
self._session.invoke_api(vim_util, 'get_objects',
self._vim, 'VirtualMachine',
self.MAX_OBJECTS)
m.ReplayAll()
self._volumeops.get_backing(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_get_backing_multiple_retrieval(self):
"""Test get_backing with multiple retrieval."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
retrieve_result = FakeRetrieveResult([], 'my_token')
self._session.invoke_api(vim_util, 'get_objects',
self._vim, 'VirtualMachine',
self.MAX_OBJECTS).AndReturn(retrieve_result)
m.StubOutWithMock(self._volumeops, 'cancel_retrieval')
self._volumeops.continue_retrieval(retrieve_result)
m.ReplayAll()
self._volumeops.get_backing(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_delete_backing(self):
"""Test delete_backing."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
backing = FakeMor('VirtualMachine', 'my_vm')
self._session.invoke_api(self._vim, 'Destroy_Task', backing)
m.StubOutWithMock(self._session, 'wait_for_task')
self._session.wait_for_task(mox.IgnoreArg())
m.ReplayAll()
self._volumeops.delete_backing(backing)
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_without_backing(self):
"""Test delete_volume without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing('hello_world').AndReturn(None)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_delete_volume_with_backing(self):
"""Test delete_volume with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
backing = FakeMor('VirtualMachine', 'my_vm')
task = FakeMor('Task', 'my_task')
m.StubOutWithMock(self._volumeops, 'get_backing')
m.StubOutWithMock(self._volumeops, 'delete_backing')
self._volumeops.get_backing('hello_world').AndReturn(backing)
self._volumeops.delete_backing(backing)
m.ReplayAll()
volume = FakeObject()
volume['name'] = 'hello_world'
self._driver.delete_volume(volume)
m.UnsetStubs()
m.VerifyAll()
def test_create_export(self):
"""Test create_export."""
self._driver.create_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_ensure_export(self):
"""Test ensure_export."""
self._driver.ensure_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_remove_export(self):
"""Test remove_export."""
self._driver.remove_export(mox.IgnoreArg(), mox.IgnoreArg())
def test_terminate_connection(self):
"""Test terminate_connection."""
self._driver.terminate_connection(mox.IgnoreArg(), mox.IgnoreArg(),
force=mox.IgnoreArg())
def test_get_host(self):
"""Test get_host."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
instance = FakeObject()
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, instance, 'runtime.host')
m.ReplayAll()
self._volumeops.get_host(instance)
m.UnsetStubs()
m.VerifyAll()
def test_get_hosts(self):
"""Test get_hosts."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
self._session.invoke_api(vim_util, 'get_objects', self._vim,
'HostSystem', self.MAX_OBJECTS)
m.ReplayAll()
self._volumeops.get_hosts()
m.UnsetStubs()
m.VerifyAll()
def test_is_valid_with_accessible_attr(self):
"""Test _is_valid with accessible attribute."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
datastore = FakeMor('Datastore', 'my_ds')
mntInfo = FakeObject()
mntInfo.accessMode = "readWrite"
mntInfo.accessible = True
host = FakeMor('HostSystem', 'my_host')
host_mount = FakeObject()
host_mount.key = host
host_mount.mountInfo = mntInfo
host_mounts = FakeObject()
host_mounts.DatastoreHostMount = [host_mount]
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, datastore,
'host').AndReturn(host_mounts)
m.ReplayAll()
self.assertTrue(self._volumeops._is_valid(datastore, host))
m.UnsetStubs()
m.VerifyAll()
def test_is_valid_without_accessible_attr(self):
"""Test _is_valid without accessible attribute."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
datastore = FakeMor('Datastore', 'my_ds')
mntInfo = FakeObject()
mntInfo.accessMode = "readWrite"
host = FakeMor('HostSystem', 'my_host')
host_mount = FakeObject()
host_mount.key = host
host_mount.mountInfo = mntInfo
host_mounts = FakeObject()
host_mounts.DatastoreHostMount = [host_mount]
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, datastore,
'host').AndReturn(host_mounts)
m.StubOutWithMock(self._volumeops, 'get_summary')
summary = FakeObject()
summary.accessible = True
self._volumeops.get_summary(datastore).AndReturn(summary)
m.ReplayAll()
self.assertTrue(self._volumeops._is_valid(datastore, host))
m.UnsetStubs()
m.VerifyAll()
def test_get_dss_rp(self):
"""Test get_dss_rp."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
host = FakeObject()
self._session.invoke_api(vim_util, 'get_object_properties',
self._vim, host,
['datastore', 'parent']).AndReturn([])
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, mox.IgnoreArg(), 'resourcePool')
m.ReplayAll()
self.assertRaises(error_util.VimException, self._volumeops.get_dss_rp,
host)
m.UnsetStubs()
m.VerifyAll()
def test_get_dss_rp_without_datastores(self):
"""Test get_dss_rp without datastores."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
host = FakeObject()
props = [FakeElem(prop_set=[FakeProp(name='datastore')])]
self._session.invoke_api(vim_util, 'get_object_properties',
self._vim, host,
['datastore', 'parent']).AndReturn(props)
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, mox.IgnoreArg(), 'resourcePool')
m.ReplayAll()
self.assertRaises(error_util.VimException, self._volumeops.get_dss_rp,
host)
m.UnsetStubs()
m.VerifyAll()
def test_get_parent(self):
"""Test get_parent."""
# Not recursive
child = FakeMor('Parent', 'my_parent')
parent = self._volumeops._get_parent(child, 'Parent')
self.assertEqual(parent, child)
# Recursive
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
parent = FakeMor('Parent', 'my_parent1')
child = FakeMor('Child', 'my_child')
self._session.invoke_api(vim_util, 'get_object_property', self._vim,
child, 'parent').AndReturn(parent)
m.ReplayAll()
ret = self._volumeops._get_parent(child, 'Parent')
self.assertEqual(ret, parent)
m.UnsetStubs()
m.VerifyAll()
def test_get_dc(self):
"""Test get_dc."""
m = self.mox
m.StubOutWithMock(self._volumeops, '_get_parent')
self._volumeops._get_parent(mox.IgnoreArg(), 'Datacenter')
m.ReplayAll()
self._volumeops.get_dc(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_get_vmfolder(self):
"""Test get_vmfolder."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
datacenter = FakeMor('Datacenter', 'my_dc')
self._session.invoke_api(vim_util, 'get_object_property', self._vim,
datacenter, 'vmFolder')
m.ReplayAll()
dc = self._volumeops.get_vmfolder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_create_backing(self):
"""Test create_backing."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
folder = FakeMor('Folder', 'my_fol')
resource_pool = FakeMor('ResourcePool', 'my_rs')
host = FakeMor('HostSystem', 'my_host')
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._vim, 'CreateVM_Task', folder,
config=mox.IgnoreArg(), pool=resource_pool,
host=host).AndReturn(task)
m.StubOutWithMock(self._session, 'wait_for_task')
task_info = FakeTaskInfo('success', mox.IgnoreArg())
self._session.wait_for_task(task).AndReturn(task_info)
name = 'my_vm'
size_kb = 1 * units.MiB
disk_type = 'thick'
ds_name = 'my_ds'
m.StubOutWithMock(self._volumeops, '_get_create_spec')
self._volumeops._get_create_spec(name, size_kb, disk_type, ds_name)
m.ReplayAll()
self._volumeops.create_backing(name, size_kb, disk_type, folder,
resource_pool, host, ds_name)
m.UnsetStubs()
m.VerifyAll()
def test_create_backing_in_inventory_multi_hosts(self):
"""Test _create_backing_in_inventory scanning multiple hosts."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
host1 = FakeObj(obj=FakeMor('HostSystem', 'my_host1'))
host2 = FakeObj(obj=FakeMor('HostSystem', 'my_host2'))
host3 = FakeObj(obj=FakeMor('HostSystem', 'my_host3'))
retrieve_result = FakeRetrieveResult([host1, host3, host2], None)
m.StubOutWithMock(self._volumeops, 'get_hosts')
self._volumeops.get_hosts().AndReturn(retrieve_result)
m.StubOutWithMock(self._driver, '_create_backing')
volume = FakeObject()
volume['name'] = 'vol_name'
backing = FakeMor('VirtualMachine', 'my_back')
mux = self._driver._create_backing(volume, host1.obj)
mux.AndRaise(error_util.VimException('Maintenance mode'))
mux = self._driver._create_backing(volume, host3.obj)
mux.AndRaise(error_util.VimFaultException(
[], 'Bad host connection state'))
mux = self._driver._create_backing(volume, host2.obj)
mux.AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'cancel_retrieval')
self._volumeops.cancel_retrieval(retrieve_result)
m.StubOutWithMock(self._volumeops, 'continue_retrieval')
m.ReplayAll()
result = self._driver._create_backing_in_inventory(volume)
self.assertEqual(result, backing)
m.UnsetStubs()
m.VerifyAll()
def test_get_datastore(self):
"""Test get_datastore."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
backing = FakeMor('VirtualMachine', 'my_back')
datastore = FakeMor('Datastore', 'my_ds')
datastores = FakeManagedObjectReference([datastore])
self._session.invoke_api(vim_util, 'get_object_property', self._vim,
backing, 'datastore').AndReturn(datastores)
m.ReplayAll()
result = self._volumeops.get_datastore(backing)
self.assertEqual(result, datastore)
m.UnsetStubs()
m.VerifyAll()
def test_get_summary(self):
"""Test get_summary."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
datastore = FakeMor('Datastore', 'my_ds')
self._session.invoke_api(vim_util, 'get_object_property', self._vim,
datastore, 'summary')
m.ReplayAll()
self._volumeops.get_summary(datastore)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing(self):
"""Test initialize_connection with instance and backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_select_datastore_summary(self):
"""Test _select_datastore_summary."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
datastore3 = FakeMor('Datastore', 'my_ds_3')
datastore4 = FakeMor('Datastore', 'my_ds_4')
datastores = [datastore1, datastore2, datastore3, datastore4]
m.StubOutWithMock(self._volumeops, 'get_summary')
summary1 = FakeDatastoreSummary(10, 10)
summary2 = FakeDatastoreSummary(25, 50)
summary3 = FakeDatastoreSummary(50, 50)
summary4 = FakeDatastoreSummary(100, 100)
moxd = self._volumeops.get_summary(datastore1)
moxd.MultipleTimes().AndReturn(summary1)
moxd = self._volumeops.get_summary(datastore2)
moxd.MultipleTimes().AndReturn(summary2)
moxd = self._volumeops.get_summary(datastore3)
moxd.MultipleTimes().AndReturn(summary3)
moxd = self._volumeops.get_summary(datastore4)
moxd.MultipleTimes().AndReturn(summary4)
m.ReplayAll()
summary = self._driver._select_datastore_summary(1, datastores)
self.assertEqual(summary, summary1)
summary = self._driver._select_datastore_summary(10, datastores)
self.assertEqual(summary, summary3)
summary = self._driver._select_datastore_summary(50, datastores)
self.assertEqual(summary, summary4)
self.assertRaises(error_util.VimException,
self._driver._select_datastore_summary,
100, datastores)
m.UnsetStubs()
m.VerifyAll()
def test_get_folder_ds_summary(self):
"""Test _get_folder_ds_summary."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
size = 1
resource_pool = FakeMor('ResourcePool', 'my_rp')
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_dc')
self._volumeops.get_dc(resource_pool).AndReturn(datacenter)
m.StubOutWithMock(self._driver, '_get_volume_group_folder')
folder = FakeMor('Folder', 'my_fol')
self._driver._get_volume_group_folder(datacenter).AndReturn(folder)
m.StubOutWithMock(self._driver, '_select_datastore_summary')
size = 1
datastores = [FakeMor('Datastore', 'my_ds')]
self._driver._select_datastore_summary(size * units.GiB, datastores)
m.ReplayAll()
self._driver._get_folder_ds_summary(size, resource_pool, datastores)
m.UnsetStubs()
m.VerifyAll()
def test_get_disk_type(self):
"""Test _get_disk_type."""
volume = FakeObject()
volume['volume_type_id'] = None
self.assertEqual(vmdk.VMwareEsxVmdkDriver._get_disk_type(volume),
'thin')
def test_init_conn_with_instance_no_backing(self):
"""Test initialize_connection with instance and without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
volume['volume_type_id'] = None
connector = {'instance': 'my_instance'}
self._volumeops.get_backing(volume['name'])
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
resource_pool = FakeMor('ResourcePool', 'my_rp')
datastores = [FakeMor('Datastore', 'my_ds')]
self._volumeops.get_dss_rp(host).AndReturn((datastores, resource_pool))
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1)
self._driver._get_folder_ds_summary(volume['size'], resource_pool,
datastores).AndReturn((folder,
summary))
backing = FakeMor('VirtualMachine', 'my_back')
m.StubOutWithMock(self._volumeops, 'create_backing')
self._volumeops.create_backing(volume['name'],
volume['size'] * units.MiB,
mox.IgnoreArg(), folder,
resource_pool, host,
mox.IgnoreArg()).AndReturn(backing)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_without_instance(self):
"""Test initialize_connection without instance and a backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
backing = FakeMor('VirtualMachine', 'my_back')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
connector = {}
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_operation(self):
"""Test volumeops.create_snapshot."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
name = 'snapshot_name'
description = 'snapshot_desc'
backing = FakeMor('VirtualMachine', 'my_back')
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._vim, 'CreateSnapshot_Task', backing,
name=name, description=description,
memory=False, quiesce=False).AndReturn(task)
result = FakeMor('VirtualMachineSnapshot', 'my_snap')
success_task_info = FakeTaskInfo('success', result=result)
m.StubOutWithMock(self._session, 'wait_for_task')
self._session.wait_for_task(task).AndReturn(success_task_info)
m.ReplayAll()
self._volumeops.create_snapshot(backing, name, description)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_without_backing(self):
"""Test vmdk.create_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_with_backing(self):
"""Test vmdk.create_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snapshot_name'
snapshot['display_description'] = 'snapshot_desc'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'create_snapshot')
self._volumeops.create_snapshot(backing, snapshot['name'],
snapshot['display_description'])
m.ReplayAll()
self._driver.create_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_snapshot_when_attached(self):
"""Test vmdk.create_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(exception.InvalidVolume,
self._driver.create_snapshot, snapshot)
def test_get_snapshot_from_tree(self):
"""Test _get_snapshot_from_tree."""
volops = volumeops.VMwareVolumeOps
ret = volops._get_snapshot_from_tree(mox.IgnoreArg(), None)
self.assertEqual(ret, None)
name = 'snapshot_name'
snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
root = FakeSnapshotTree(name='snapshot_name', snapshot=snapshot)
ret = volops._get_snapshot_from_tree(name, root)
self.assertEqual(ret, snapshot)
snapshot1 = FakeMor('VirtualMachineSnapshot', 'my_snap_1')
root = FakeSnapshotTree(name='snapshot_name_1', snapshot=snapshot1,
childSnapshotList=[root])
ret = volops._get_snapshot_from_tree(name, root)
self.assertEqual(ret, snapshot)
def test_get_snapshot(self):
"""Test get_snapshot."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
name = 'snapshot_name'
backing = FakeMor('VirtualMachine', 'my_back')
root = FakeSnapshotTree()
tree = FakeSnapshotTree(tree=[root])
self._session.invoke_api(vim_util, 'get_object_property',
self._session.vim, backing,
'snapshot').AndReturn(tree)
volops = volumeops.VMwareVolumeOps
m.StubOutWithMock(volops, '_get_snapshot_from_tree')
volops._get_snapshot_from_tree(name, root)
m.ReplayAll()
self._volumeops.get_snapshot(backing, name)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_not_present(self):
"""Test volumeops.delete_snapshot, when not present."""
m = self.mox
m.StubOutWithMock(self._volumeops, 'get_snapshot')
name = 'snapshot_name'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_snapshot(backing, name)
m.ReplayAll()
self._volumeops.delete_snapshot(backing, name)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_when_present(self):
"""Test volumeops.delete_snapshot, when it is present."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
m.StubOutWithMock(self._volumeops, 'get_snapshot')
name = 'snapshot_name'
backing = FakeMor('VirtualMachine', 'my_back')
snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
self._volumeops.get_snapshot(backing, name).AndReturn(snapshot)
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._session.vim,
'RemoveSnapshot_Task', snapshot,
removeChildren=False).AndReturn(task)
m.StubOutWithMock(self._session, 'wait_for_task')
self._session.wait_for_task(task)
m.ReplayAll()
self._volumeops.delete_snapshot(backing, name)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_without_backing(self):
"""Test delete_snapshot without backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_with_backing(self):
"""Test delete_snapshot with backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['name'] = 'snapshot_name'
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'available'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'delete_snapshot')
self._volumeops.delete_snapshot(backing,
snapshot['name'])
m.ReplayAll()
self._driver.delete_snapshot(snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_delete_snapshot_when_attached(self):
"""Test delete_snapshot when volume is attached."""
snapshot = FakeObject()
snapshot['volume'] = FakeObject()
snapshot['volume']['status'] = 'in-use'
self.assertRaises(exception.InvalidVolume,
self._driver.delete_snapshot, snapshot)
def test_create_cloned_volume_without_backing(self):
"""Test create_cloned_volume without a backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['status'] = 'available'
src_vref = FakeObject()
src_vref['name'] = 'src_volume_name'
self._volumeops.get_backing(src_vref['name'])
m.ReplayAll()
self._driver.create_cloned_volume(volume, src_vref)
m.UnsetStubs()
m.VerifyAll()
def test_get_path_name(self):
"""Test get_path_name."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
backing = FakeMor('VirtualMachine', 'my_back')
class FakePath(object):
def __init__(self, path=None):
self.vmPathName = path
path = FakePath()
self._session.invoke_api(vim_util, 'get_object_property', self._vim,
backing, 'config.files').AndReturn(path)
m.ReplayAll()
self._volumeops.get_path_name(backing)
m.UnsetStubs()
m.VerifyAll()
def test_delete_file(self):
"""Test _delete_file."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
src_path = 'src_path'
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._vim, 'DeleteDatastoreFile_Task',
mox.IgnoreArg(), name=src_path,
datacenter=mox.IgnoreArg()).AndReturn(task)
m.StubOutWithMock(self._session, 'wait_for_task')
self._session.wait_for_task(task)
m.ReplayAll()
self._volumeops.delete_file(src_path)
m.UnsetStubs()
m.VerifyAll()
def test_clone_backing_by_copying(self):
"""Test _clone_backing_by_copying."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
volume = FakeObject()
src_vmdk_path = "[datastore] src_vm/src_vm.vmdk"
new_vmdk_path = "[datastore] dest_vm/dest_vm.vmdk"
backing = FakeMor('VirtualMachine', 'my_back')
m.StubOutWithMock(self._driver, '_create_backing_in_inventory')
mux = self._driver._create_backing_in_inventory(volume)
mux.AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
self._volumeops.get_vmdk_path(backing).AndReturn(new_vmdk_path)
m.StubOutWithMock(self._volumeops, 'get_dc')
datacenter = FakeMor('Datacenter', 'my_dc')
self._volumeops.get_dc(backing).AndReturn(datacenter)
m.StubOutWithMock(self._volumeops, 'delete_vmdk_file')
self._volumeops.delete_vmdk_file(new_vmdk_path, datacenter)
m.StubOutWithMock(self._volumeops, 'copy_vmdk_file')
self._volumeops.copy_vmdk_file(datacenter, src_vmdk_path,
new_vmdk_path)
m.ReplayAll()
self._driver._clone_backing_by_copying(volume, src_vmdk_path)
m.UnsetStubs()
m.VerifyAll()
def test_create_cloned_volume_with_backing(self):
"""Test create_cloned_volume with a backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
src_vref = FakeObject()
src_vref['name'] = 'src_snapshot_name'
backing = FakeMor('VirtualMachine', 'my_vm')
self._volumeops.get_backing(src_vref['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
src_vmdk_path = "[datastore] src_vm/src_vm.vmdk"
self._volumeops.get_vmdk_path(backing).AndReturn(src_vmdk_path)
m.StubOutWithMock(self._driver, '_clone_backing_by_copying')
self._driver._clone_backing_by_copying(volume, src_vmdk_path)
m.ReplayAll()
self._driver.create_cloned_volume(volume, src_vref)
m.UnsetStubs()
m.VerifyAll()
def test_create_volume_from_snapshot_without_backing(self):
"""Test create_volume_from_snapshot without a backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snap_name'
self._volumeops.get_backing(snapshot['volume_name'])
m.ReplayAll()
self._driver.create_volume_from_snapshot(volume, snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_volume_from_snap_without_backing_snap(self):
"""Test create_volume_from_snapshot without a backing snapshot."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
backing = FakeMor('VirtualMachine', 'my_vm')
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_snapshot')
snapshot['name'] = 'snapshot_name'
self._volumeops.get_snapshot(backing, snapshot['name'])
m.ReplayAll()
self._driver.create_volume_from_snapshot(volume, snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Test create_volume_from_snapshot."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
backing = FakeMor('VirtualMachine', 'my_vm')
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_snapshot')
snapshot['name'] = 'snapshot_name'
snapshot_mor = FakeMor('VirtualMachineSnapshot', 'my_snap')
self._volumeops.get_snapshot(backing,
snapshot['name']).AndReturn(snapshot_mor)
m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
src_vmdk_path = "[datastore] src_vm/src_vm-001.vmdk"
self._volumeops.get_vmdk_path(snapshot_mor).AndReturn(src_vmdk_path)
m.StubOutWithMock(self._driver, '_clone_backing_by_copying')
self._driver._clone_backing_by_copying(volume, src_vmdk_path)
m.ReplayAll()
self._driver.create_volume_from_snapshot(volume, snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_get_entity_name(self):
"""Test volumeops get_entity_name."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
entity = FakeMor('VirtualMachine', 'virt')
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, entity, 'name')
m.ReplayAll()
self._volumeops.get_entity_name(entity)
m.UnsetStubs()
m.VerifyAll()
def test_get_vmdk_path(self):
"""Test volumeops get_vmdk_path."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
backing = FakeMor('VirtualMachine', 'my_back')
vmdk_path = '[datastore 1] folders/myvols/volume-123.vmdk'
class VirtualDisk:
pass
virtualDisk = VirtualDisk()
class VirtualDiskFlatVer2BackingInfo:
pass
backingInfo = VirtualDiskFlatVer2BackingInfo()
backingInfo.fileName = vmdk_path
virtualDisk.backing = backingInfo
devices = [FakeObject(), virtualDisk, FakeObject()]
moxed = self._session.invoke_api(vim_util, 'get_object_property',
self._vim, backing,
'config.hardware.device')
moxed.AndReturn(devices)
m.ReplayAll()
actual_vmdk_path = self._volumeops.get_vmdk_path(backing)
self.assertEqual(backingInfo.__class__.__name__,
'VirtualDiskFlatVer2BackingInfo')
self.assertEqual(virtualDisk.__class__.__name__, 'VirtualDisk')
self.assertEqual(actual_vmdk_path, vmdk_path)
m.UnsetStubs()
m.VerifyAll()
def test_copy_vmdk_file(self):
"""Test copy_vmdk_file."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
dc_ref = FakeMor('Datacenter', 'dc1')
src_path = 'src_path'
dest_path = 'dest_path'
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._vim, 'CopyVirtualDisk_Task',
mox.IgnoreArg(), sourceName=src_path,
sourceDatacenter=dc_ref, destName=dest_path,
destDatacenter=dc_ref,
force=True).AndReturn(task)
m.StubOutWithMock(self._session, 'wait_for_task')
self._session.wait_for_task(task)
m.ReplayAll()
self._volumeops.copy_vmdk_file(dc_ref, src_path, dest_path)
m.UnsetStubs()
m.VerifyAll()
def test_delete_vmdk_file(self):
"""Test delete_vmdk_file."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
dc_ref = FakeMor('Datacenter', 'dc1')
vmdk_path = 'vmdk_path'
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._vim, 'DeleteVirtualDisk_Task',
mox.IgnoreArg(), name=vmdk_path,
datacenter=dc_ref).AndReturn(task)
m.StubOutWithMock(self._session, 'wait_for_task')
self._session.wait_for_task(task)
m.ReplayAll()
self._volumeops.delete_vmdk_file(vmdk_path, dc_ref)
m.UnsetStubs()
m.VerifyAll()
def test_split_datastore_path(self):
"""Test volumeops split_datastore_path."""
test1 = '[datastore1] myfolder/mysubfolder/myvm.vmx'
(datastore,
folder,
file_name) = volumeops.split_datastore_path(test1)
self.assertEqual(datastore, 'datastore1')
self.assertEqual(folder, 'myfolder/mysubfolder/')
self.assertEqual(file_name, 'myvm.vmx')
test2 = '[datastore2 ] myfolder/myvm.vmdk'
(datastore,
folder,
file_name) = volumeops.split_datastore_path(test2)
self.assertEqual(datastore, 'datastore2')
self.assertEqual(folder, 'myfolder/')
self.assertEqual(file_name, 'myvm.vmdk')
test3 = 'myfolder/myvm.vmdk'
self.assertRaises(IndexError, volumeops.split_datastore_path, test3)
def test_copy_image_to_volume_non_vmdk(self):
"""Test copy_image_to_volume for a non-vmdk disk format."""
m = self.mox
image_id = 'image-123456789'
image_meta = FakeObject()
image_meta['disk_format'] = 'novmdk'
image_service = m.CreateMock(glance.GlanceImageService)
image_service.show(mox.IgnoreArg(), image_id).AndReturn(image_meta)
m.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
self._driver.copy_image_to_volume,
mox.IgnoreArg(), mox.IgnoreArg(),
image_service, image_id)
m.UnsetStubs()
m.VerifyAll()
def test_copy_image_to_volume_vmdk(self):
"""Test copy_image_to_volume with an acceptable vmdk disk format."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
image_id = 'image-id'
image_meta = FakeObject()
image_meta['disk_format'] = 'vmdk'
image_meta['size'] = 1024 * 1024
image_service = m.CreateMock(glance.GlanceImageService)
image_service.show(mox.IgnoreArg(), image_id).AndReturn(image_meta)
volume = FakeObject()
vol_name = 'volume name'
volume['name'] = vol_name
backing = FakeMor('VirtualMachine', 'my_vm')
m.StubOutWithMock(self._driver, '_create_backing_in_inventory')
self._driver._create_backing_in_inventory(volume).AndReturn(backing)
datastore_name = 'datastore1'
flat_vmdk_path = 'myvolumes/myvm-flat.vmdk'
m.StubOutWithMock(self._driver, '_get_ds_name_flat_vmdk_path')
moxed = self._driver._get_ds_name_flat_vmdk_path(mox.IgnoreArg(),
vol_name)
moxed.AndReturn((datastore_name, flat_vmdk_path))
host = FakeMor('Host', 'my_host')
m.StubOutWithMock(self._volumeops, 'get_host')
self._volumeops.get_host(backing).AndReturn(host)
datacenter = FakeMor('Datacenter', 'my_datacenter')
m.StubOutWithMock(self._volumeops, 'get_dc')
self._volumeops.get_dc(host).AndReturn(datacenter)
datacenter_name = 'my-datacenter'
m.StubOutWithMock(self._volumeops, 'get_entity_name')
self._volumeops.get_entity_name(datacenter).AndReturn(datacenter_name)
flat_path = '[%s] %s' % (datastore_name, flat_vmdk_path)
m.StubOutWithMock(self._volumeops, 'delete_file')
self._volumeops.delete_file(flat_path, datacenter)
client = FakeObject()
client.options = FakeObject()
client.options.transport = FakeObject()
cookies = FakeObject()
client.options.transport.cookiejar = cookies
m.StubOutWithMock(self._vim.__class__, 'client')
self._vim.client = client
m.StubOutWithMock(vmware_images, 'fetch_image')
timeout = self._config.vmware_image_transfer_timeout_secs
vmware_images.fetch_image(mox.IgnoreArg(), timeout, image_service,
image_id, host=self.IP,
data_center_name=datacenter_name,
datastore_name=datastore_name,
cookies=cookies,
file_path=flat_vmdk_path)
m.ReplayAll()
self._driver.copy_image_to_volume(mox.IgnoreArg(), volume,
image_service, image_id)
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_non_vmdk(self):
"""Test copy_volume_to_image for a non-vmdk disk format."""
m = self.mox
image_meta = FakeObject()
image_meta['disk_format'] = 'novmdk'
volume = FakeObject()
volume['name'] = 'vol-name'
volume['instance_uuid'] = None
volume['attached_host'] = None
m.ReplayAll()
self.assertRaises(exception.ImageUnacceptable,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_when_attached(self):
"""Test copy_volume_to_image when volume is attached."""
m = self.mox
volume = FakeObject()
volume['instance_uuid'] = 'my_uuid'
m.ReplayAll()
self.assertRaises(exception.InvalidVolume,
self._driver.copy_volume_to_image,
mox.IgnoreArg(), volume,
mox.IgnoreArg(), mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_copy_volume_to_image_vmdk(self):
"""Test copy_volume_to_image for a valid vmdk disk format."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'session')
self._driver.session = self._session
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
image_id = 'image-id-1'
image_meta = FakeObject()
image_meta['disk_format'] = 'vmdk'
image_meta['id'] = image_id
image_meta['name'] = image_id
image_service = FakeObject()
vol_name = 'volume-123456789'
project_id = 'project-owner-id-123'
volume = FakeObject()
volume['name'] = vol_name
volume['project_id'] = project_id
volume['instance_uuid'] = None
volume['attached_host'] = None
# volumeops.get_backing
backing = FakeMor("VirtualMachine", "my_vm")
m.StubOutWithMock(self._volumeops, 'get_backing')
self._volumeops.get_backing(vol_name).AndReturn(backing)
# volumeops.get_vmdk_path
datastore_name = 'datastore1'
file_path = 'my_folder/my_nested_folder/my_vm.vmdk'
vmdk_file_path = '[%s] %s' % (datastore_name, file_path)
m.StubOutWithMock(self._volumeops, 'get_vmdk_path')
self._volumeops.get_vmdk_path(backing).AndReturn(vmdk_file_path)
tmp_vmdk = '[datastore1] %s.vmdk' % image_id
# volumeops.get_host
host = FakeMor('Host', 'my_host')
m.StubOutWithMock(self._volumeops, 'get_host')
self._volumeops.get_host(backing).AndReturn(host)
# volumeops.get_dc
datacenter_name = 'my_datacenter'
datacenter = FakeMor('Datacenter', datacenter_name)
m.StubOutWithMock(self._volumeops, 'get_dc')
self._volumeops.get_dc(host).AndReturn(datacenter)
# volumeops.copy_vmdk_file
m.StubOutWithMock(self._volumeops, 'copy_vmdk_file')
self._volumeops.copy_vmdk_file(datacenter, vmdk_file_path, tmp_vmdk)
# host_ip
host_ip = self.IP
# volumeops.get_entity_name
m.StubOutWithMock(self._volumeops, 'get_entity_name')
self._volumeops.get_entity_name(datacenter).AndReturn(datacenter_name)
# cookiejar
client = FakeObject()
client.options = FakeObject()
client.options.transport = FakeObject()
cookies = FakeObject()
client.options.transport.cookiejar = cookies
m.StubOutWithMock(self._vim.__class__, 'client')
self._vim.client = client
# flat_vmdk
flat_vmdk_file = '%s-flat.vmdk' % image_id
# vmware_images.upload_image
timeout = self._config.vmware_image_transfer_timeout_secs
m.StubOutWithMock(vmware_images, 'upload_image')
vmware_images.upload_image(mox.IgnoreArg(), timeout, image_service,
image_id, project_id, host=host_ip,
data_center_name=datacenter_name,
datastore_name=datastore_name,
cookies=cookies,
file_path=flat_vmdk_file,
snapshot_name=image_meta['name'],
image_version=1)
# volumeops.delete_vmdk_file
m.StubOutWithMock(self._volumeops, 'delete_vmdk_file')
self._volumeops.delete_vmdk_file(tmp_vmdk, datacenter)
m.ReplayAll()
self._driver.copy_volume_to_image(mox.IgnoreArg(), volume,
image_service, image_meta)
m.UnsetStubs()
m.VerifyAll()
def test_retrieve_properties_ex_fault_checker(self):
"""Test retrieve_properties_ex_fault_checker is called."""
m = self.mox
class FakeVim(vim.Vim):
def __init__(self):
pass
@property
def client(self):
class FakeRetrv(object):
def RetrievePropertiesEx(self, collector):
pass
def __getattr__(self, name):
if name == 'service':
return FakeRetrv()
return FakeRetrv()
def RetrieveServiceContent(self, type='ServiceInstance'):
return mox.MockAnything()
_vim = FakeVim()
m.ReplayAll()
# retrieve_properties_ex_fault_checker throws authentication error
self.assertRaises(error_util.VimFaultException,
_vim.RetrievePropertiesEx, mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
class VMwareVcVmdkDriverTestCase(VMwareEsxVmdkDriverTestCase):
"""Test class for VMwareVcVmdkDriver."""
def setUp(self):
super(VMwareVcVmdkDriverTestCase, self).setUp()
self._driver = vmdk.VMwareVcVmdkDriver(configuration=self._config)
def test_create_folder_not_present(self):
"""Test create_folder when not present."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
parent_folder = FakeMor('Folder', 'my_par_fol')
child_entities = FakeManagedObjectReference()
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, parent_folder,
'childEntity').AndReturn(child_entities)
self._session.invoke_api(self._vim, 'CreateFolder', parent_folder,
name='child_folder_name')
m.ReplayAll()
dc = self._volumeops.create_folder(parent_folder, 'child_folder_name')
m.UnsetStubs()
m.VerifyAll()
def test_create_folder_already_present(self):
"""Test create_folder when already present."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
parent_folder = FakeMor('Folder', 'my_par_fol')
child_folder = FakeMor('Folder', 'my_child_fol')
child_entities = FakeManagedObjectReference([child_folder])
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, parent_folder,
'childEntity').AndReturn(child_entities)
self._session.invoke_api(vim_util, 'get_object_property',
self._vim, child_folder,
'name').AndReturn('child_folder_name')
m.ReplayAll()
fol = self._volumeops.create_folder(parent_folder, 'child_folder_name')
self.assertEqual(fol, child_folder)
m.UnsetStubs()
m.VerifyAll()
def test_relocate_backing(self):
"""Test relocate_backing."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._volumeops, '_get_relocate_spec')
datastore = FakeMor('Datastore', 'my_ds')
resource_pool = FakeMor('ResourcePool', 'my_rp')
host = FakeMor('HostSystem', 'my_host')
disk_move_type = 'moveAllDiskBackingsAndAllowSharing'
self._volumeops._get_relocate_spec(datastore, resource_pool, host,
disk_move_type)
m.StubOutWithMock(self._session, 'invoke_api')
backing = FakeMor('VirtualMachine', 'my_back')
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._vim, 'RelocateVM_Task',
backing, spec=mox.IgnoreArg()).AndReturn(task)
m.StubOutWithMock(self._session, 'wait_for_task')
self._session.wait_for_task(task)
m.ReplayAll()
self._volumeops.relocate_backing(backing, datastore,
resource_pool, host)
m.UnsetStubs()
m.VerifyAll()
def test_move_backing_to_folder(self):
"""Test move_backing_to_folder."""
m = self.mox
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
backing = FakeMor('VirtualMachine', 'my_back')
folder = FakeMor('Folder', 'my_fol')
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._vim, 'MoveIntoFolder_Task',
folder, list=[backing]).AndReturn(task)
m.StubOutWithMock(self._session, 'wait_for_task')
self._session.wait_for_task(task)
m.ReplayAll()
self._volumeops.move_backing_to_folder(backing, folder)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing(self):
"""Test initialize_connection with instance and backing."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
datastore = FakeMor('Datastore', 'my_ds')
resource_pool = FakeMor('ResourcePool', 'my_rp')
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
self._volumeops.get_dss_rp(host).AndReturn(([datastore],
resource_pool))
m.StubOutWithMock(self._volumeops, 'get_datastore')
self._volumeops.get_datastore(backing).AndReturn(datastore)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_volume_group_folder(self):
"""Test _get_volume_group_folder."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
datacenter = FakeMor('Datacenter', 'my_dc')
m.StubOutWithMock(self._volumeops, 'get_vmfolder')
self._volumeops.get_vmfolder(datacenter)
m.StubOutWithMock(self._volumeops, 'create_folder')
self._volumeops.create_folder(mox.IgnoreArg(),
self._config.vmware_volume_folder)
m.ReplayAll()
self._driver._get_volume_group_folder(datacenter)
m.UnsetStubs()
m.VerifyAll()
def test_init_conn_with_instance_and_backing_and_relocation(self):
"""Test initialize_connection with backing being relocated."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['id'] = 'volume_id'
volume['size'] = 1
connector = {'instance': 'my_instance'}
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(volume['name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_host')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(mox.IgnoreArg()).AndReturn(host)
datastore1 = FakeMor('Datastore', 'my_ds_1')
datastore2 = FakeMor('Datastore', 'my_ds_2')
resource_pool = FakeMor('ResourcePool', 'my_rp')
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
self._volumeops.get_dss_rp(host).AndReturn(([datastore1],
resource_pool))
m.StubOutWithMock(self._volumeops, 'get_datastore')
self._volumeops.get_datastore(backing).AndReturn(datastore2)
m.StubOutWithMock(self._driver, '_get_folder_ds_summary')
folder = FakeMor('Folder', 'my_fol')
summary = FakeDatastoreSummary(1, 1, datastore1)
size = 1
self._driver._get_folder_ds_summary(size, resource_pool,
[datastore1]).AndReturn((folder,
summary))
m.StubOutWithMock(self._volumeops, 'relocate_backing')
self._volumeops.relocate_backing(backing, datastore1,
resource_pool, host)
m.StubOutWithMock(self._volumeops, 'move_backing_to_folder')
self._volumeops.move_backing_to_folder(backing, folder)
m.ReplayAll()
conn_info = self._driver.initialize_connection(volume, connector)
self.assertEqual(conn_info['driver_volume_type'], 'vmdk')
self.assertEqual(conn_info['data']['volume'], 'my_back')
self.assertEqual(conn_info['data']['volume_id'], 'volume_id')
m.UnsetStubs()
m.VerifyAll()
def test_get_folder(self):
"""Test _get_folder."""
m = self.mox
m.StubOutWithMock(self._volumeops, '_get_parent')
self._volumeops._get_parent(mox.IgnoreArg(), 'Folder')
m.ReplayAll()
self._volumeops._get_folder(mox.IgnoreArg())
m.UnsetStubs()
m.VerifyAll()
def test_volumeops_clone_backing(self):
"""Test volumeops.clone_backing."""
m = self.mox
m.StubOutWithMock(self._volumeops, '_get_parent')
backing = FakeMor('VirtualMachine', 'my_back')
folder = FakeMor('Folder', 'my_fol')
self._volumeops._get_folder(backing).AndReturn(folder)
m.StubOutWithMock(self._volumeops, '_get_clone_spec')
name = 'name'
snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
datastore = FakeMor('Datastore', 'my_ds')
self._volumeops._get_clone_spec(datastore, mox.IgnoreArg(), snapshot)
m.StubOutWithMock(api.VMwareAPISession, 'vim')
self._session.vim = self._vim
m.StubOutWithMock(self._session, 'invoke_api')
task = FakeMor('Task', 'my_task')
self._session.invoke_api(self._vim, 'CloneVM_Task', backing,
folder=folder, name=name,
spec=mox.IgnoreArg()).AndReturn(task)
m.StubOutWithMock(self._session, 'wait_for_task')
clone = FakeMor('VirtualMachine', 'my_clone')
task_info = FakeTaskInfo('success', clone)
self._session.wait_for_task(task).AndReturn(task_info)
m.ReplayAll()
ret = self._volumeops.clone_backing(name, backing, snapshot,
mox.IgnoreArg(), datastore)
self.assertEqual(ret, clone)
m.UnsetStubs()
m.VerifyAll()
def test_clone_backing_linked(self):
"""Test _clone_backing with clone type - linked."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'clone_backing')
volume = FakeObject()
volume['name'] = 'volume_name'
self._volumeops.clone_backing(volume['name'], mox.IgnoreArg(),
mox.IgnoreArg(),
volumeops.LINKED_CLONE_TYPE,
mox.IgnoreArg())
m.ReplayAll()
self._driver._clone_backing(volume, mox.IgnoreArg(), mox.IgnoreArg(),
volumeops.LINKED_CLONE_TYPE)
m.UnsetStubs()
m.VerifyAll()
def test_clone_backing_full(self):
"""Test _clone_backing with clone type - full."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_host')
backing = FakeMor('VirtualMachine', 'my_vm')
host = FakeMor('HostSystem', 'my_host')
self._volumeops.get_host(backing).AndReturn(host)
m.StubOutWithMock(self._volumeops, 'get_dss_rp')
datastore = FakeMor('Datastore', 'my_ds')
datastores = [datastore]
resource_pool = FakeMor('ResourcePool', 'my_rp')
self._volumeops.get_dss_rp(host).AndReturn((datastores,
resource_pool))
m.StubOutWithMock(self._driver, '_select_datastore_summary')
volume = FakeObject()
volume['name'] = 'volume_name'
volume['size'] = 1
summary = FakeDatastoreSummary(1, 1, datastore=datastore)
self._driver._select_datastore_summary(volume['size'] * units.GiB,
datastores).AndReturn(summary)
m.StubOutWithMock(self._volumeops, 'clone_backing')
self._volumeops.clone_backing(volume['name'], backing,
mox.IgnoreArg(),
volumeops.FULL_CLONE_TYPE,
datastore)
m.ReplayAll()
self._driver._clone_backing(volume, backing, mox.IgnoreArg(),
volumeops.FULL_CLONE_TYPE)
m.UnsetStubs()
m.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Test create_volume_from_snapshot."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
snapshot = FakeObject()
snapshot['volume_name'] = 'volume_name'
snapshot['name'] = 'snapshot_name'
backing = FakeMor('VirtualMachine', 'my_back')
self._volumeops.get_backing(snapshot['volume_name']).AndReturn(backing)
m.StubOutWithMock(self._volumeops, 'get_snapshot')
snap_mor = FakeMor('VirtualMachineSnapshot', 'my_snap')
self._volumeops.get_snapshot(backing,
snapshot['name']).AndReturn(snap_mor)
volume = FakeObject()
volume['volume_type_id'] = None
m.StubOutWithMock(self._driver, '_clone_backing')
self._driver._clone_backing(volume, backing, snap_mor, mox.IgnoreArg())
m.ReplayAll()
self._driver.create_volume_from_snapshot(volume, snapshot)
m.UnsetStubs()
m.VerifyAll()
def test_create_cloned_volume_with_backing(self):
"""Test create_cloned_volume with clone type - full."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
backing = FakeMor('VirtualMachine', 'my_back')
src_vref = FakeObject()
src_vref['name'] = 'src_vol_name'
src_vref['status'] = 'available'
self._volumeops.get_backing(src_vref['name']).AndReturn(backing)
volume = FakeObject()
volume['volume_type_id'] = None
m.StubOutWithMock(self._driver, '_clone_backing')
self._driver._clone_backing(volume, backing, mox.IgnoreArg(),
volumeops.FULL_CLONE_TYPE)
m.ReplayAll()
self._driver.create_cloned_volume(volume, src_vref)
m.UnsetStubs()
def test_create_linked_cloned_volume_with_backing(self):
"""Test create_cloned_volume with clone type - linked."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
backing = FakeMor('VirtualMachine', 'my_back')
src_vref = FakeObject()
src_vref['name'] = 'src_vol_name'
src_vref['status'] = 'available'
self._volumeops.get_backing(src_vref['name']).AndReturn(backing)
volume = FakeObject()
volume['id'] = 'volume_id'
m.StubOutWithMock(vmdk.VMwareVcVmdkDriver, '_get_clone_type')
moxed = vmdk.VMwareVcVmdkDriver._get_clone_type(volume)
moxed.AndReturn(volumeops.LINKED_CLONE_TYPE)
m.StubOutWithMock(self._volumeops, 'create_snapshot')
name = 'snapshot-%s' % volume['id']
snapshot = FakeMor('VirtualMachineSnapshot', 'my_snap')
self._volumeops.create_snapshot(backing, name,
None).AndReturn(snapshot)
m.StubOutWithMock(self._driver, '_clone_backing')
self._driver._clone_backing(volume, backing, snapshot,
volumeops.LINKED_CLONE_TYPE)
m.ReplayAll()
self._driver.create_cloned_volume(volume, src_vref)
m.UnsetStubs()
def test_create_linked_cloned_volume_when_attached(self):
"""Test create_cloned_volume linked clone when volume is attached."""
m = self.mox
m.StubOutWithMock(self._driver.__class__, 'volumeops')
self._driver.volumeops = self._volumeops
m.StubOutWithMock(self._volumeops, 'get_backing')
backing = FakeMor('VirtualMachine', 'my_back')
src_vref = FakeObject()
src_vref['name'] = 'src_vol_name'
src_vref['status'] = 'in-use'
volume = FakeObject()
self._volumeops.get_backing(src_vref['name']).AndReturn(backing)
m.StubOutWithMock(vmdk.VMwareVcVmdkDriver, '_get_clone_type')
moxed = vmdk.VMwareVcVmdkDriver._get_clone_type(volume)
moxed.AndReturn(volumeops.LINKED_CLONE_TYPE)
m.ReplayAll()
self.assertRaises(exception.InvalidVolume,
self._driver.create_cloned_volume, volume, src_vref)
m.UnsetStubs()
m.VerifyAll()
| rickerc/cinder_audit | cinder/tests/test_vmware_vmdk.py | Python | apache-2.0 | 78,544 |
import flask
from pypi_vm.infrastructure.view_modifiers import response
from pypi_vm.viewmodels.home.home_index_viewmodel import HomeIndexViewModel
blueprint = flask.Blueprint('home', __name__, template_folder='templates')
@blueprint.route('/')
@response(template_file='home/index.html')
def index():
vm = HomeIndexViewModel()
return vm.to_dict()
| Wintellect/WintellectWebinars | 2019-06-06-ten-tips-python-web-devs-kennedy/code/top_10_web_explore/ex07_viewmodels/pypi_vm/views/home_view.py | Python | apache-2.0 | 359 |
# -----------------------------------------------------------------------------
#
# -*- coding: utf-8 -*-
#
# phlox-libdc1394/dc1394/frame.py
#
# Copyright (C) 2016, by Matthias Yang Chen <matthias_cy@outlook.com>
# All rights reserved.
#
# phlox-libdc1394 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# phlox-libdc1394 is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with phlox-libdc1394. If not,
# see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
from ctypes import ARRAY, c_byte
from numpy import ndarray
from .core import *
__all__ = ['Frame']
class Frame(ndarray):
"""
A frame returned by the camera.
All metadata are retained as attributes of the resulting image.
"""
_cam = None
_frame = None
def __new__(cls, camera, frame):
"""
Convert a dc1394 frame into a Frame instance.
:param camera:
:param frame:
:return:
"""
dtype = ARRAY(c_byte, frame.contents.image_bytes)
buf = dtype.from_address(frame.contents.image)
width, height = frame.contents.size
pixels = width * height
endian = frame.contents.little_endian and '<' or '>'
type_str = '%su%i' % (endian, frame.contents.image_bytes / pixels)
img = ndarray.__new__(cls, shape=(height, width), dtype=type_str, buffer=buf)
img.frame_id = frame.contents.id
img.frames_behind = frame.contents.frames_behind
img.position = frame.contents.position
img.packet_size = frame.contents.packet_size
img.packets_per_frame = frame.contents.packet_per_frame
img.timestamp = frame.contents.timestamp
img.video_mode = video_modes[frame.contents.video_mode]
img.data_depth = frame.contents.data_depth
img.color_coding = color_codings[frame.contents.color_coding]
img.color_filter = frame.contents.color_filter
img.yuv_byte_order = frame.contents.yuv_byte_order
img.stride = frame.contents.stride
# save camera and frame for enqueue()
img._frame = frame
img._cam = camera
return img
def __array_finalize__(self, img):
"""
Finalize the new Image class array.
If called with an image object, inherit the properties of that image.
"""
if img is None:
return
# do not inherit _frame and _cam since we also get called on copy()
# and should not hold references to the frame in this case
for key in ["position", "color_coding", "color_filter",
"yuv_byte_order", "stride", "packet_size",
"packets_per_frame", "timestamp", "frames_behind",
"frame_id", "data_depth", "video_mode"]:
setattr(self, key, getattr(img, key, None))
def enqueue(self):
"""
Returns a frame to the ring buffer once it has been used.
This method is also called implicitly on ``del``.
Only call this method on the original frame obtained from
Camera.dequeue` and not on its views, new-from-templates or
copies. Otherwise an AttributeError will be raised.
"""
if not hasattr(self, "_frame"): # or self.base is not None:
raise AttributeError("can only enqueue the original frame")
if self._frame is not None:
dll.dc1394_capture_enqueue(self._cam, self._frame)
self._frame = None
self._cam = None
# from contextlib iport closing
# with closing(camera.dequeue()) as im:
# do stuff with im
close = enqueue
def __del__(self):
try:
self.enqueue()
except AttributeError:
pass
@property
def corrupt(self):
"""
Whether this frame corrupt.
Returns ``True`` if the given frame has been detected to be
corrupt (missing data, corrupted data, overrun buffer, etc.) and
``False`` otherwise.
.. note::
Certain types of corruption may go undetected in which case
``False`` will be returned erroneously. The ability to
detect corruption also varies between platforms.
.. note::
Corrupt frames still need to be enqueued with `enqueue`
when no longer needed by the user.
"""
return bool(dll.dc1394_capture_is_frame_corrupt(self._cam, self._frame))
def to_rgb(self):
"""
Convert the image to an RGB image.
Array shape is: (image.shape[0], image.shape[1], 3)
Uses the dc1394_convert_to_RGB() function for the conversion.
"""
res = ndarray(3 * self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_RGB8(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
res.shape = shape[0], shape[1], 3
return res
def to_mono8(self):
"""
Convert he image to 8 bit gray scale.
Uses the dc1394_convert_to_MONO8() function
"""
res = ndarray(self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_MONO8(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
res.shape = shape
return res
def to_yuv422(self):
"""
Convert he image to YUV422 color format.
Uses the dc1394_convert_to_YUV422() function
"""
res = ndarray(self.size, dtype='u1')
shape = self.shape
inp = ndarray(shape=len(self.data), buffer=self.data, dtype='u1')
dll.dc1394_convert_to_YUV422(inp, res, shape[1], shape[0],
self.yuv_byte_order, self.color_coding,
self.data_depth)
return ndarray(shape=shape, buffer=res.data, dtype='u2')
| PhloxAR/phloxar | PhloxAR/dc1394/frame.py | Python | apache-2.0 | 6,774 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Ian Good
# Copyright 2014 Ryan Lane
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Redis backends for the various services."""
import dateutil.parser
import copy
from keystone import exception
from keystone.openstack.common import timeutils
from keystone import token
from keystone.openstack.common import jsonutils
from common.session import RedisSession
from common import keys
class Token(RedisSession, token.Driver):
def __init__(self, *args, **kwargs):
RedisSession.__init__(self, *args, **kwargs)
def flush_all(self):
self.conn.flushall()
def get_token(self, token_id):
token_key = keys.token(token_id)
value = self.readonly.get(token_key)
if value:
token = jsonutils.loads(value)
if token.get('expires', None) is not None:
token['expires'] = dateutil.parser.parse(token['expires'])
if token['expires'] > timeutils.utcnow():
return token
else:
return token
raise exception.TokenNotFound(token_id=token_id)
def _set_keys(self, user_id, token_id, json_data, ttl_seconds):
pipe = self.conn.pipeline()
token_key = keys.token(token_id)
if user_id:
user_key = keys.usertoken(user_id['id'], token_id)
if ttl_seconds is None:
pipe.set(token_key, json_data)
if user_id:
pipe.set(user_key, '')
else:
pipe.setex(token_key, json_data, ttl_seconds)
if user_id:
pipe.setex(user_key, '', ttl_seconds)
pipe.execute()
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
user_id = data_copy.get('user', None)
if 'expires' not in data_copy:
data_copy['expires'] = self._get_default_expire_time()
json_data = jsonutils.dumps(data_copy)
self._set_keys(user_id, token_id, json_data, self.ttl_seconds)
return data_copy
def _delete_keys(self, user_id, token_id):
pipe = self.conn.pipeline()
token_key = keys.token(token_id)
pipe.delete(token_key, )
if user_id is not None:
user_key = keys.usertoken(user_id['id'], token_id)
pipe.delete(user_key, )
pipe.sadd(keys.revoked(), token_id)
return pipe.execute()[0]
def delete_token(self, token_id):
data = self.get_token(token_id)
user_id = data.get('user', None)
if not self._delete_keys(user_id, token_id):
raise exception.TokenNotFound(token_id=token_id)
def list_tokens(self, user_id, tenant=None):
pattern = keys.usertoken(user_id, '*')
user_keys = self.readonly.keys(pattern)
return [keys.parse_usertoken(key)[1] for key in user_keys]
def list_revoked_tokens(self):
return [{'id': s} for s in self.readonly.smembers(keys.revoked())]
| ryan-lane/keystone-redis | keystoneredis/token.py | Python | apache-2.0 | 3,492 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2013 Joe Harris
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
'''
Trivial example of how to ban an IP. See:
http://www.cloudflare.com/docs/client-api.html#s4.7
'''
import os, sys
# make sure our local copy of txcloudflare is in sys.path
PATH_TO_TXCF = '../txcloudflare/'
try:
import txcloudflare
except ImportError:
txcfpath = os.path.dirname(os.path.realpath(PATH_TO_TXCF))
if txcfpath not in sys.path:
sys.path.insert(0, txcfpath)
from twisted.internet import reactor
import txcloudflare
def got_response(response):
'''
'response' is a txcloudflare.response.Response() instance.
'''
print '< got a response (done)'
print '< ip: {0}'.format(response.data.get('ip', ''))
print '< action: {0}'.format(response.data.get('action', ''))
reactor.stop()
def got_error(error):
'''
'error' is a twisted.python.failure.Failure() instance wrapping one of
the exceptions in txcloudflare.errors. The exceptions return the
CloudFlare error code, a plain text string and a response object
(txcloudflare.response.Response). The response object has a 'request'
parameter if you need to look at the reques that generated the error.
'''
print '< error'
print error.printTraceback()
reactor.stop()
email_address = os.environ.get('TXCFEMAIL', '')
api_token = os.environ.get('TXCFAPI', '')
if __name__ == '__main__':
ip = '8.8.8.8'
print '> banning IP: {0}'.format(ip)
cloudflare = txcloudflare.client_api(email_address, api_token)
cloudflare.ban(ip=ip).addCallback(got_response).addErrback(got_error)
reactor.run()
'''
EOF
'''
| meeb/txcloudflare | examples/ip_ban.py | Python | apache-2.0 | 2,255 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import networkx as nx
import copy
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER, DEAD_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.ofproto.ofproto_v1_3 import OFP_DEFAULT_PRIORITY
from ryu.topology.api import get_all_switch, get_all_link, get_all_host
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet, arp, icmp
from ryu.lib.packet import ether_types
from ryu.lib import hub
'''
###For 2 chapter###
fig 2-8
pre-install flow entries for end-to-end hosts('h1' and 'h2')
----test----
Linear topology
ICMP
'''
class ProactiveApp(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(ProactiveApp, self).__init__(*args, **kwargs)
self.mac_to_port = {}
self.discover_thread = hub.spawn(self.pre_install)
# {dpid:{port:mac,port:mac,...},dpid:{port:mac,port:mac,...},...} only switches'mac
self.dpids_port_to_mac = dict()
# [dpid,dpid,...]
self.dpids = list()
# {(dpid,port):host_mac,(dpid,port):host_mac,...} only hosts'mac
self.dpids_port_to_host = dict()
#[host_mac,host_mac,host_mac,...]
self.hosts = list()
#{(src_dpid,dst_dpid):(src_port,dst_port),():(),...}
self.links_dpid_to_port = dict()
# [(src_dpid,dst_dpid),(src_dpid,dst_dpid),...]
self.links = list()
self.adjacency_matrix = dict()
self.pre_adjacency_matrix = dict()
# {
# (dpid,dpid):{xxx:[dpid,dpid,dpid],xxx:[dpid,dpid,dpid,dpid],...},
# (dpid,dpid):{xxx:[dpid,dpid,dpid],xxx:[dpid,dpid,dpid,dpid],...},
# ...}
self.path_table = dict()
self.dpid_to_dp = dict()
self.SLEEP_PERIOD = 2 #seconds
@set_ev_cls(ofp_event.EventOFPStateChange,[MAIN_DISPATCHER, DEAD_DISPATCHER])
def state_change_handler(self, ev):
datapath = ev.datapath
if ev.state == MAIN_DISPATCHER:
if not datapath.id in self.dpid_to_dp:
self.logger.info('register datapath: %04x', datapath.id)
self.dpid_to_dp[datapath.id] = datapath
elif ev.state == DEAD_DISPATCHER:
if datapath.id in self.dpid_to_dp:
self.logger.info('un register datapath: %04x', datapath.id)
del self.dpid_to_dp[datapath.id]
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
datapath = ev.msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
match = parser.OFPMatch()
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
self.add_flow(datapath, 0, match, actions)
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
if buffer_id:
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
datapath.send_msg(mod)
def pre_install(self):
while True:
hub.sleep(self.SLEEP_PERIOD)
self.pre_adjacency_matrix = copy.deepcopy(self.adjacency_matrix)
self._update_topology()
self._update_hosts()
if self.pre_adjacency_matrix != self.adjacency_matrix:
self.logger.info('***********discover_topology thread: TOPO UPDATE***********')
self.path_table = self._get_path_table(self.adjacency_matrix)
self.pre_install_flow()
def _update_topology(self):
switch_list = get_all_switch(self)
if len(switch_list) != 0:
self.dpids_port_to_mac = self._get_dpids_port_to_mac(switch_list)
self.dpids = self._get_dpids(switch_list) #[dpid,dpid,dpid,...]
link_dict = get_all_link(self)
if len(link_dict) != 0:
self.links_dpid_to_port = self._get_links_dpid_to_port(link_dict)
self.links = self._get_links(self.links_dpid_to_port) #[(src.dpid,dst.dpid),(src.dpid,dst.dpid),...]
if self.dpids and self.links:
self.adjacency_matrix = self._get_adjacency_matrix(self.dpids, self.links)
def _get_dpids_port_to_mac(self,switch_list):
table = dict()
for switch in switch_list:
dpid = switch.dp.id
table.setdefault(dpid,{})
ports = switch.ports
for port in ports:
table[dpid][port.port_no] = port.hw_addr
return table
def _get_dpids(self,switch_list):
dpid_list = list()
for switch in switch_list:
dpid_list.append(switch.dp.id)
return dpid_list
def _get_links(self,link_ports_table):
return link_ports_table.keys()
def _get_links_dpid_to_port(self,link_dict):
table = dict()
for link in link_dict.keys():
src = link.src #ryu.topology.switches.Port
dst = link.dst
table[(src.dpid,dst.dpid)] = (src.port_no, dst.port_no)
return table
def _get_adjacency_matrix(self,dpids,links):
graph = dict()
for src in dpids:
graph[src] = dict()
for dst in dpids:
graph[src][dst] = float('inf')
if src == dst:
graph[src][dst] = 0
elif (src, dst) in links:
graph[src][dst] = 1
return graph
def _get_path_table(self, matrix):
if matrix:
dpids = matrix.keys()
g = nx.Graph()
g.add_nodes_from(dpids)
for i in dpids:
for j in dpids:
if matrix[i][j] == 1:
g.add_edge(i,j,weight=1)
return self.__graph_to_path(g)
def __graph_to_path(self,g): # {(i,j):[[],[],...],(i,j):[[],[],[],..],...}
all_shortest_paths = dict()
for i in g.nodes():
for j in g.nodes():
if i == j:
continue
all_shortest_paths[(i,j)] = list()
try:
nx.shortest_path(g,i,j)
except nx.exception.NetworkXNoPath:
continue
for each in nx.all_shortest_paths(g,i,j):
all_shortest_paths[(i,j)].append(each)
return all_shortest_paths
def _update_hosts(self):
host_list = get_all_host(self)
if host_list:
self.dpids_port_to_host = self._get_dpids_port_to_host(host_list)
self.hosts = self._get_hosts(host_list)
def _get_dpids_port_to_host(self,host_list):
table = dict()
for host in host_list:
host_mac = host.mac
host_port = host.port # Port
dpid = host_port.dpid
table[(dpid,host_port.port_no)] = host_mac
return table
def _get_hosts(self,host_list):
hosts = list()
for host in host_list:
hosts.append(host.mac)
return hosts
def pre_install_flow(self):
print("execute pre-install flow")
if len(self.hosts) == 2:
print("host num:",2)
host1 = self.hosts[0]
host2 = self.hosts[1]
self._pre_install_flow(host1,host2)
self._pre_install_flow(host2,host1)
def _pre_install_flow(self,host1,host2):
host1_dpid = None
host2_dpid = None
host1_port = None
host2_port = None
for dpid_port in self.dpids_port_to_host.keys():
if self.dpids_port_to_host[dpid_port] == host1:
host1_dpid = dpid_port[0]
host1_port = dpid_port[1]
elif self.dpids_port_to_host[dpid_port] == host2:
host2_dpid = dpid_port[0]
host2_port = dpid_port[1]
if host1_dpid == host2_dpid:
datapath = self.dpid_to_dp[host1_dpid]
parser = datapath.ofproto_parser
priority = OFP_DEFAULT_PRIORITY
match = parser.OFPMatch(in_port=host1_port,eth_dst=host2) # , eth_dst=host2
actions = [parser.OFPActionOutput(host2_port)]
self.add_flow(datapath, priority, match, actions)
else:
traffic = self.path_table[(host1_dpid,host2_dpid)][0]
length = len(traffic)
for i in range(length):
datapath = self.dpid_to_dp[traffic[i]]
parser = datapath.ofproto_parser
priority = OFP_DEFAULT_PRIORITY
if i == 0:
match = parser.OFPMatch(in_port=host1_port,eth_dst=host2) # , eth_dst=host2
out_port = self.links_dpid_to_port[(traffic[i],traffic[i+1])][0]
actions = [parser.OFPActionOutput(out_port)]
self.add_flow(datapath, priority, match, actions)
elif i == length -1:
in_port = self.links_dpid_to_port[(traffic[i-1],traffic[i])][1]
match = parser.OFPMatch(in_port=in_port,eth_dst=host2) # , eth_dst=host2
actions = [parser.OFPActionOutput(host2_port)]
self.add_flow(datapath, priority, match, actions)
else:
in_port = self.links_dpid_to_port[(traffic[i-1],traffic[i])][1]
out_port = self.links_dpid_to_port[(traffic[i],traffic[i+1])][0]
match = parser.OFPMatch(in_port=in_port,eth_dst=host2) # , eth_dst=host2
actions = [parser.OFPActionOutput(out_port)]
self.add_flow(datapath, priority, match, actions)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
ar = pkt.get_protocol(arp.arp)
ic = pkt.get_protocol(icmp.icmp)
if isinstance(ar, arp.arp):
print("-----arp packet------")
print("dpid:",datapath.id)
# print("dpid:",datapath.id)
# print(pkt)
# for each in self.mac_to_port:
# print "dpid:",each
# for a in self.mac_to_port[each]:
# print "mac:",a,"->","port:",self.mac_to_port[each][a]
if isinstance(ic, icmp.icmp):
print("-----icmp packet------")
print("dpid:",datapath.id)
# print(pkt)
# for each in self.mac_to_port:
# print "dpid:",each
# for a in self.mac_to_port[each]:
# print "mac:",a,"->","port:",self.mac_to_port[each][a]
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
# self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [parser.OFPActionOutput(out_port)]
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
datapath.send_msg(out)
| Zouyiran/ryu | ryu/app/chapter_2/pre_install_app.py | Python | apache-2.0 | 12,687 |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for yelp dataset module."""
from tensorflow_datasets import testing
from tensorflow_datasets.text import yelp_polarity
class YelpPolarityReviewsTest(testing.DatasetBuilderTestCase):
DATASET_CLASS = yelp_polarity.YelpPolarityReviews
SPLITS = {
"train": 2,
"test": 2,
}
if __name__ == "__main__":
testing.test_main()
| tensorflow/datasets | tensorflow_datasets/text/yelp_polarity_test.py | Python | apache-2.0 | 961 |
#!/usr/bin/env python
import sys
sys.path.append("../build/")
import phisSchema
import pyxb
import warnings
# Strategy:
# Perhaps cleanest would be to build a separate interface for data that may vary from VFB.
# This also allows separation of Jython code
# OTOH - this gives another layer of mappings to maintain.
# Sketch of interface:
# minimal vars to set (for now):
# image_id, image URL, source links; expressed feature (+ its type - gene or transgene); classification of struc & overlapped region
# Generator functions live outside the classes. They generate objects that must then be bound.
def gen_OntologyTerm(id_name, ID):
"""Takes id_name lookup dict for ontology terms and an ID
Returns a phisSchema.OntologyTerm object"""
ot = phisSchema.OntologyTerm()
ot.termId = ID
ot.termLabel = id_name[ID]
return ot
def gen_Link(display_name, url):
"""Takes display_name and URI as args and returns a phisSchema.Link object"""
gen_Link = phisSchema.Link()
gen_Link.display_name = display_name
gen_Link.url = url
return gen_Link
def gen_Annotation(ot, text, mode):
"""Generate a phisSchema.Annotation object based on specified:
ot: ontology term
text: free text
mode: Manual/Automated"""
annotation = phisSchema.Annotation()
annotation.annotation_freetext = text
annotation.ontology_term = ot
annotation.annotationMode = mode
return annotation
def gen_roi_Coordinates(x, y, z):
"""Generate a phisSchema.Coordinates object for an roi
Each arg specifies a range in the form of a list or tuple
with 2 elements
"""
try:
assert len(x) == 2
assert len(y) == 2
assert len(z) == 2
except:
warnings.warn("Percent arrays should have only 2 members - specifying a range.")
coord = phisSchema.Coordinates()
coord.x_coordinates = _gen_PercentArray(*x)
coord.y_coordinates = _gen_PercentArray(*y)
coord.z_coordinates = _gen_PercentArray(*z)
return coord
def _gen_PercentArray(a, b):
AB = (a, b)
pa = phisSchema.PercentArray()
pa.extend(AB)
return pa
def gen_GenotypeComponent(gf_symbol=False, gf_id=False, gene_symbol=False, gene_id=False, gf_ensembl_id=False):
## How to specify channel. Use defaults? ###
"""Generate a phisSchema.GenotypeComponent object.
All args are strings. Please specify each arg with a keyword
"""
gc = phisSchema.GenotypeComponent()
if gene_id:
gc.gene_id = gene_id
if gene_symbol:
gc.gene_symbol = gene_symbol
if gf_symbol:
gc.genetic_feature_symbol = gf_symbol
if gf_id:
gc.genetic_feature_id = gf_id
if gf_ensembl_id:
gc.genetic_feature_ensembl_id = gf_ensembl_id
return gc
class imageDataSet():
"""Class to use for generating sets of images from a common source.
Assumes all datasets have common source name and URL.
And that they share a background channel marker and visualization methods
for background and signal channels. All of these are set by methods rather than KWARGS.
"""
# May not be worth bothering with a class here
def __init__(self, ont_dict):
### Do we have a way to distinguish general source from specific source links?
self.doc = phisSchema.Doc()
self.source = ''
self.background_channel_marker = ''
self.signal_channel_visualisation_methods = []
self.background_channel_visualisation_methods = []
self.ont_dict = ont_dict
def set_source(self, source_name, source_url):
"""source_name and source_url are strings"""
self.source = gen_Link(source_name, source_url)
def set_background_channel_marker(self, genotype_component):
"""Takes a phisSchema.genotypeComponent object as an arg"""
self.background_channel_marker = genotype_component
def add_signal_channel_visualisation_method(self, sfid):
"""sfid is the shortFormId of and FBbi visualisation method"""
self.signal_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid))
def add_background_channel_visualisation_method(self, sfid):
"""sfid is the shortFormId of and FBbi visualisation method"""
self.background_channel_visualisation_methods.append(gen_OntologyTerm(self.ont_dict, sfid))
class VfbImage():
"""Interface class for loading VFB data.
Assumes 3D confocal image with 2 channels -
a background stain channel and a signal channel
depicting some interesting expression/anatomy"""
# Define constants here: Or should this just jump straight to populating model?
host = gen_Link("Virtual Fly Brain", "http://www.virtualflybrain.org") # for image_description.host
def __init__(self, ont, image_dataset):
"""ont: an ID:name dict of ontology terms used in XML to be produced
d: A image_dataset object
"""
self.ont = ont
self._initialise_image()
self._unpack_image_dataset(image_dataset)
self.image.image_description.host = self.host
def _unpack_image_dataset(self, image_dataset):
self.set_source(image_dataset.source)
# self.set_signal_channel_visualisation_method(image_dataset.) # Needs extend rather than append?
# self.set_background_channel_visualisation_method(image_dataset.) # Needs extend rather than append?
self.set_expressed_feature_for_background_channel(image_dataset.background_channel_marker)
def set_organism(self, stage, sex):
"""stage must be a phisSchema.ontologyTerm object; sex must be the string 'Male' or 'Female'"""
organism = phisSchema.Organism()
organism.taxon = "Drosophila melanogaster"
organism.sex = sex
organism.ncbi_taxon_id = "NCBItaxon_7227"
organism.stage=stage
self.image.organism = organism
def _initialise_image(self):
"""Assume 2 channels each with an associated ROI at 100%.
All objects generated by multiple iterations appended to common doc.
Generate IDs for two channels and corresponding ROIs according to the scheme:
image_id-a/b roi_id-a/b; channel_id-a/b - where id = self.vfb_image_id.
channel1/roi1 = background. channel2/roi2 = signal."""
# Generate Root objects
self.image = phisSchema.Image()
self.channel1 = phisSchema.Channel()
self.channel2 = phisSchema.Channel()
self.roi1 = phisSchema.Roi()
self.roi2 = phisSchema.Roi()
# bind root objects to doc
# Which pattern??
# This doesn't work for multiple images rois: self.doc.append(image)
# Need to work on checking the more obvious self.doc.image.append(self.image)
self.doc.image.append(self.image)
self.doc.channel.append(self.channel1)
self.doc.channel.append(self.channel2)
self.doc.roi.append(self.roi1)
self.doc.roi.append(self.roi2)
# Populate IDs
self.image.id = "image_" + self.vfb_image_id
self.channel1.id = "channel_" + self.vfb_image_id + "-a"
self.channel2.id = "channel_" + self.vfb_image_id + "-b"
self.roi1.id = "roi_" + self.vfb_image_id + "-a"
self.roi2.id = "roi_" + self.vfb_image_id + "-b"
self.image.associated_roi = pyxb.BIND() # Special magic
self.image.associated_roi.el.append(self.roi1.id) # Is this correct, or should I be populating a string array and appending that?
self.image.associated_roi.el.append(self.roi2.id)
self.image.associated_channel = pyxb.BIND()
self.image.associated_channel.el.append(self.channel1.id)
self.image.associated_channel.el.append(self.channel2.id)
self.channel1.associated_image = self.image.id
self.channel2.associated_image = self.image.id
self.roi1.associated_image = self.image.id
self.roi2.associated_image = self.image.id
self.roi1.associated_channel = pyxb.BIND()
self.roi1.associated_channel.el.append(self.channel1.id)
self.roi2.associated_channel = pyxb.BIND()
self.roi2.associated_channel.el.append(self.channel2.id)
self.channel1.associated_roi = pyxb.BIND()
self.channel1.associated_roi.el.append(self.roi1.id)
self.channel2.associated_roi = pyxb.BIND()
self.channel2.associated_roi.el.append(self.roi2.id)
# both ROIs cover whole image:
self.roi1.coordinates = gen_roi_Coordinates((0,100), (0,100), (0,100))
self.roi2.coordinates = gen_roi_Coordinates((0,100), (0,100), (0,100))
self.depicted_anatomy_background = phisSchema.AnnotationArray()
self.roi1.depicted_anatomical_structure = self.depicted_anatomy_background
self.depicted_anatomy_exp_channel = phisSchema.AnnotationArray()
self.roi2.depicted_anatomical_structure = self.depicted_anatomy_exp_channel
# Expansions. Add more here as needed.
self.image_description = phisSchema.ImageDescription()
self.image.image_description = self.image_description
self.image.image_description.sample_preparation = pyxb.BIND()
self.image.image_description.imaging_method = pyxb.BIND()
# Method 1 - intermediate node and directly bind
imaging_methods = phisSchema.OntologyTermArray()
self.image.image_description.imaging_method = imaging_methods # But remember - this is only possible because of an earlier pyxB expansion
imaging_methods.append(gen_OntologyTerm(self.ont, "FBbi_00000251"))
# Method 2 - pyxB.BIND() expansion
self.image.image_description.sample_preparation = pyxb.BIND()
self.image.image_description.sample_preparation.append(gen_OntologyTerm(self.ont, "FBbi_00000024")) # whole mount tissue
self.image.image_description.sample_preparation.append(gen_OntologyTerm(self.ont, "FBbi_00000002")) # chemically fixed
# Set methods generate the relevant object and bind it.
def set_dimensions(self, x, y, z=0):
"""x, y and z are dimensions in pixels. Z is optional (default 0)"""
dimensions = phisSchema.Dimensions()
dimensions.image_width = x
dimensions.image_height = y
dimensions.image_depth = z
self.image_description.image_dimensions = dimensions
def set_image_and_sample_type(self, wt_or_mut, exp_anat_phen):
self.image.image_description.sample_type = "wild type"
ita = phisSchema.ImageTypeArray()
ita.append("expression") # Use Expression if depicts expression pattern - otherwise use anatomy/phenotype. Don't this there is any case for using both.
self.image.image_description.image_type = ita
def set_source(self, source):
"""source must be a phisSchema.Link object.
Assumes source of image and organism are the same."""
self.image.image_description.image_generated_by = source
self.image.image_description.organism_generated_by = source
def set_background_channel_visualisation_method(self, sfid):
self.channel2.visualisation_method = pyxb.BIND()
self.channel2.visualisation_method.append(gen_OntologyTerm(self.ont, sfid))
def set_signal_channel_visualisation_method(self, sfid):
self.channel2.visualisation_method = pyxb.BIND()
self.channel2.visualisation_method.append(gen_OntologyTerm(self.ont, sfid))
def add_background_depicted_entity(self, sfid, text, mode):
# By convention, background channel is always roi1
annotation = gen_Annotation(gen_OntologyTerm(self.ont, sfid), text, mode)
self.depicted_anatomy_background.append(annotation)
def add_depicted_anatomy_for_expressed_feature(self, sfid, text, mode):
# By convention, background channel is always roi1
annotation = gen_Annotation(gen_OntologyTerm(self.ont, sfid), text, mode)
self.depicted_anatomy_exp_channel.append(annotation)
def set_is_expression_pattern(self, s = True):
"""By convention channel2 is signal channel."""
# Should really just be a boolean.
if s:
self.channel2.is_expression_pattern = "Yes"
else:
self.channel2.is_expression_pattern = "No"
def set_expressed_feature_for_signal_channel(self, genotype_component):
"""genotype_component: a phisSchema.GenotypeComponent object."""
self.channel2.depicts_expression_of = genotype_component
def set_expressed_feature_for_background_channel(self, genotype_component):
"""genotype_component: a phisSchema.GenotypeComponent object."""
self.channel1.depicts_expression_of = genotype_component
def set_image_context_url(self, url):
self.image.image_description.image_context_url = url
class VfbWtAdultBrainImage(VfbImage):
"""Args:
- ont is a name_id dict lookup for ontology terms.
- image_dataset is an imageDataSet object
- vfb_image_id is an id string for the image
- image_url is also a string
Compulsory fields to set in order to generate XML:
- set_sex("Male/Female")
- set_is_expression_pattern(True/False)
- add_depicted_anatomy_for_expressed_feature(ont_term)
Other necessary fields to set for usable XML:
- set_expressed_feature
- set_visualisation_method
Set by default:
- sample prep: chemically fixed; whole mount tissue
- imaging methods: confocal microscopy
- image has 2 channels - one background, and one signal.
- organism: Dmel
- stage: adult
- Background channel anatomy: adult brain
- Dimensions = 512,512,512
"""
# Consider ditching this subclass if don't find a bunch of more specific things to say. Might be better to have subclasses for neuron, clone and expression pattern
# One doc for all images.
def __init__(self, ont, image_dataset, vfb_image_id, image_url):
self.ont = ont
self.doc = image_dataset.doc
self.vfb_image_id = vfb_image_id
self._initialise_image()
self.image.image_description.image_url = image_url
self.set_source(image_dataset.source)
self.stage = gen_OntologyTerm(ont, "FBdv_00005369") # Hmmmm - global!
self.image.image_description.host = self.host
self.set_dimensions(512, 512, 512)
self.add_background_depicted_entity("FBbt_00003624", "background channel", "Manual")
ita = phisSchema.ImageTypeArray()
ita.append("expression") # Use Expression if depicts expression pattern - otherwise use anatomy/phenotype. Don't this there is any case for using both.
self.image.image_description.image_type = ita
self.image.image_description.sample_type = "wild type"
def set_sex(self, sex):
"""sex = string "Male"/"Femle". Automatically sets doc.image.organism"""
self.set_organism(self.stage, sex)
# Test
# For testing purposes. Will be autogenerated from ontology files in full run)
# Notes
# Assignment is simple - once you get all the way out to a node.
#depicted.termId = "FBbi_1234567"
#depicted.termLabel = "fubar"
# Append and instance of depicted to the list (el)
#image.depicted_anatomical_structure = pyxb.BIND()
#image.depicted_anatomical_structure.append(depicted)
# Testing
#print image.depicted_anatomical_structure.toxml()
# '<?xml version="1.0" ?><depicted_anatomical_structure><el><anatomy_ontology_id>FBbi_1234567</anatomy_ontology_id><anatomy_ontology_term>fubar</anatomy_ontology_term></el></depicted_anatomical_structure>'
# But all this feels quite verbose - can I make use of the Factory methods on some nodes to make this easier?
| PhenoImageShare/PhenoImageShare | VFB_import/src/VFB2PhisXML.py | Python | apache-2.0 | 15,991 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' query.py '''
class QueryHandler(object):
''' QueryHandler '''
def fetch(self, cluster, metric, topology, component, instance, timerange, envirn=None):
'''
:param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param envirn:
:return:
'''
pass
def fetch_max(self, cluster, metric, topology, component, instance, timerange, envirn=None):
'''
:param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param envirn:
:return:
'''
pass
def fetch_backpressure(self, cluster, metric, topology, component, instance, \
timerange, is_max, environ=None):
'''
:param cluster:
:param metric:
:param topology:
:param component:
:param instance:
:param timerange:
:param is_max:
:param environ:
:return:
'''
pass
| ashvina/heron | heron/tools/common/src/python/access/query.py | Python | apache-2.0 | 1,797 |
import re
import urllib.request
import logging
import math
from bs4 import BeautifulSoup
from Vacation import VacationPackage, Departures, Destinations, Duration
ALL_PAGES = True
DEPARTURE_MAPPING = {
Departures.OTTAWA: "YOW"
}
DEPARTURE_CITY_MAPPING = {
Departures.OTTAWA: "Ottawa"
}
DESTINATION_MAPPING = {
Destinations.MAYAN_RIVIERA: "24%7C3%2C4%2C5%2C7%2C8%2C9%2C10%2C11%2C12%2C14%7C3%2C6%2C130%2C163%2C216%2C273%2C275%2C411%2C422%2C483%2C577%2C603%2C666%2C766%2C775%2C804%2C822%2C836%2C871%2C872%2C873%2C896%2C897%2C908%2C940%2C942%2C974%2C980%2C1001%2C1003%2C1004%2C1006%2C1104%2C1189%2C1350%2C1351%2C1352%2C1373%2C1566%2C1595%2C1607%2C1616%2C1692%2C1695%2C1703%2C1705%2C1708%2C1716%2C1840%2C1900%2C1928%2C2064%2C2082%2C2093%2C2098%2C2114%2C2118%2C2120%2C2172%2C2185%2C2371%2C2565%2C2718%2C2719%2C2739%2C2823%2C3012%2C3062%2C3088%2C3105%2C7700%2C7742%2C8721%2C8791%2C9267%2C9343%2C9422%2C9557%2C9558%2C9575%2C9576%2C10196%2C10314%2C10368%2C10453%2C10647%2C10652%2C10659%2C10663%2C10698%2C10837%2C10849%2C10895%2C10904%2C10971%2C11040%2C11063%2C11116%2C11174%2C11202",
Destinations.ARUBA: "29%7C7%2C14%7C177%2C179%2C958%2C1026%2C1027%2C1028%2C1124%2C1364%2C1564%2C1680%2C1681%2C1733%2C2054%2C10351%2C10650"
}
DESTINATION_CITY_MAPPING = {
Destinations.MAYAN_RIVIERA: "Riviera+Maya",
Destinations.ARUBA: "Aruba"
}
DESTINATION_COUNTRY_MAPPING = {
Destinations.MAYAN_RIVIERA: "Mexico",
Destinations.ARUBA: "Aruba"
}
DURATION_MAPPING = {
Duration.DAYS_7: "7DAYS",
Duration.DAYS_10: "10DAYS"
}
class ExpediaScraper:
def fetch_vacation_packages(self, vacation_request):
expedia_vacation_request = ExpediaVacation_request(vacation_request)
return ExpediaVacationScraper(expedia_vacation_request).fetch_vacation_packages()
class ExpediaVacation_request:
def __init__(self, vacation_request):
self.vacation_request = vacation_request
self.from_code = DEPARTURE_MAPPING[vacation_request.departure_city]
self.origin_city = DEPARTURE_CITY_MAPPING[vacation_request.departure_city]
self.to = DESTINATION_MAPPING[vacation_request.destination]
self.to_city = DESTINATION_CITY_MAPPING[vacation_request.destination]
self.to_country = DESTINATION_COUNTRY_MAPPING[vacation_request.destination]
self.date = vacation_request.date
self.duration = DURATION_MAPPING[vacation_request.duration]
self.occupancy = "D"
self.adults = str(vacation_request.adults)
class ExpediaVacationScraper:
def __init__(self, expedia_vacation_request):
self.expedia_vacation_request = expedia_vacation_request
self.from_code = expedia_vacation_request.from_code
self.origin_city = expedia_vacation_request.origin_city
self.to = expedia_vacation_request.to
self.to_city = expedia_vacation_request.to_city
self.to_country = expedia_vacation_request.to_country
self.date = expedia_vacation_request.date
self.duration = expedia_vacation_request.duration
self.occupancy = expedia_vacation_request.occupancy
self.adults = expedia_vacation_request.adults
self.original_duration = expedia_vacation_request.vacation_request.duration
def fetch_vacation_packages(self):
results = []
page = 0
fetch_page = True
while fetch_page:
fetch_page = False
page += 1
url = "https://www.expedia.ca/all-inclusive-search?origin=" + self.from_code + "&destination=" + self.to + "&fromDate=" + self.date + "&duration=" + self.duration + "&pagingFlag=Y&pageIndex=" + str(
page) + "&occupancy=" + self.occupancy + "&originCityName=" + self.origin_city + "&destinationCityName=" + self.to_city + "&country=" + self.to_country + "&sortBy=&langid=4105&numAdults=" + self.adults + "&numChildren=0&numRooms=1"
logging.debug("Fetching URL " + url)
f = urllib.request.urlopen(url)
html = f.read()
logging.info("Done, parsing results")
soup = BeautifulSoup(html, "html.parser")
for tag in soup.find_all("div", class_="flex-card"):
# all stop information
primaryBlock = tag.find_all('div', class_='flex-area-primary')
name = primaryBlock[0].find_all('h5')[0].find_all('a')[0].get_text().strip()
primary_items = primaryBlock[0].find_all('div', class_='secondary')
city = primary_items[0].get_text().strip()
descr = primary_items[1].get_text().strip()
dates = primary_items[2].get_text().strip()
oper = primary_items[3].find(text=True).strip()
match = re.search('Operated by (.*),', oper)
oper = match.group(1).strip()
match = re.search('Depart:(.*)Return:(.*)', dates)
depart = match.group(1).strip()
retr = match.group(2).strip()
secondaryBlock = tag.find_all('div', class_='flex-area-secondary')
secondaryItems = secondaryBlock[0].find_all('div', class_='h1')
children = secondaryItems[0].findChildren()
if (len(children) > 1):
cost = children[1].get_text().strip()
else:
cost = children[0].get_text().strip()
finalCost = int(cost.replace(',', '').replace('C$', ''))
package = VacationPackage(name, oper, city, depart, retr, self.original_duration.name, finalCost)
results.append(package)
nav = soup.find_all("nav", class_="pagination")
if len(nav) > 0:
nav = nav[0]
data_per_page = int(nav.attrs['data-per-page'])
total_data = int(nav.attrs['data-total-results'])
number_of_pages = math.ceil(total_data / data_per_page)
if page < number_of_pages and ALL_PAGES:
logging.info("Completed " + str(page) + "/" + str(number_of_pages) + " pages from " + str(
total_data) + " results")
logging.info("Reading next page")
fetch_page = True
logging.info("Parsing complete")
return results
| astone282/vactrack | vactrack/Expedia.py | Python | apache-2.0 | 6,312 |
import numpy as np
file = "/home/rishabh/IdeaProjects/jitd/java/benchmark_20151213_224704_freqWrites/benchmark_scatter_1024m.txt"
# data = np.genfromtxt(file, dtype=(int, float, None), delimiter=',', names=['x', 'y', 'z'],)
data = np.genfromtxt(file, dtype=None, delimiter=',', names=['x', 'y', 'z'], )
write = 0
read = 0
max_read = 0
max_write = 0
read_count = 0
write_count = 0
total_time = 0
for each in data:
if each[2] == 'WRITE':
write += float(each[1])
max_write = max(max_write, each[1])
write_count += 1
else:
read += float(each[1])
max_read = max(max_read, each[1])
read_count += 1
total_time += each[1]
print "average_write: " + str(write / write_count)
print "average_read: " + str(read / read_count)
print "max_write: " + str(max_write)
print "max_read: " + str(max_read)
print "total_time: " + str(total_time)
| rmp91/jitd | java/stats.py | Python | apache-2.0 | 893 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pkg_resources
import unittest
from datetime import datetime, timedelta
from pylons import tmpl_context as c
from tg import config
import mock
from alluratest.controller import setup_basic_test, setup_global_objects, setup_trove_categories
from allura.tests import decorators as td
from allura.model import User, Project, TroveCategory
from allura.lib import helpers as h
from allura import model as M
from forgeuserstats.model import stats as USM
# important to be distinct from 'test' which ForgeGit uses, so that the
# tests can run in parallel and not clobber each other
test_project_with_repo = 'test2'
with_git = td.with_tool(test_project_with_repo, 'Git',
'src-git', 'Git', type='git')
class TestUserStats(unittest.TestCase):
def setUp(self):
setup_basic_test()
setup_global_objects()
self.user = User.by_username('test-user-2')
c.user = self.user
def test_init_values(self):
artifacts = self.user.stats.getArtifacts()
tickets = self.user.stats.getTickets()
commits = self.user.stats.getCommits()
assert self.user.stats.tot_logins_count == 0
assert artifacts['created'] == 0
assert artifacts['modified'] == 0
assert tickets['assigned'] == 0
assert tickets['solved'] == 0
assert tickets['revoked'] == 0
assert tickets['averagesolvingtime'] is None
assert commits['number'] == 0
assert commits['lines'] == 0
lmartifacts = self.user.stats.getLastMonthArtifacts()
lmtickets = self.user.stats.getLastMonthTickets()
lmcommits = self.user.stats.getLastMonthCommits()
assert self.user.stats.getLastMonthLogins() == 0
assert lmartifacts['created'] == 0
assert lmartifacts['modified'] == 0
assert lmtickets['assigned'] == 0
assert lmtickets['solved'] == 0
assert lmtickets['revoked'] == 0
assert lmtickets['averagesolvingtime'] is None
assert lmcommits['number'] == 0
assert lmcommits['lines'] == 0
@td.with_user_project('test-user-2')
def test_create_artifact_stats(self):
setup_trove_categories()
p = Project.query.get(shortname='u/test-user-2')
topic = TroveCategory.query.get(shortname='scientific')
init_lm_art = self.user.stats.getLastMonthArtifacts()
init_art = self.user.stats.getArtifacts()
init_art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
init_art_by_type = self.user.stats.getArtifactsByType()
init_lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
init_art_sci = self.user.stats.getArtifacts(category=topic._id)
self.user.stats.addNewArtifact('Wiki', datetime.utcnow(), p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
assert lm_art['created'] == init_lm_art['created'] + 1
assert lm_art['modified'] == init_lm_art['modified']
assert artifacts['created'] == init_art['created'] + 1
assert artifacts['modified'] == init_art['modified']
assert art_wiki['created'] == init_art_wiki['created'] + 1
assert art_wiki['modified'] == init_art_wiki['modified']
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created'] + 1
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified']
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created'] + 1
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified']
# In that case, last month stats should not be changed
new_date = datetime.utcnow() + timedelta(-32)
self.user.stats.addNewArtifact('Wiki', new_date, p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
assert lm_art['created'] == init_lm_art['created'] + 1
assert lm_art['modified'] == init_lm_art['modified']
assert artifacts['created'] == init_art['created'] + 2
assert artifacts['modified'] == init_art['modified']
assert art_wiki['created'] == init_art_wiki['created'] + 2
assert art_wiki['modified'] == init_art_wiki['modified']
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created'] + 2
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified']
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created'] + 1
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified']
p.trove_topic = [topic._id]
self.user.stats.addNewArtifact('Wiki', datetime.utcnow(), p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
art_sci = self.user.stats.getArtifacts(category=topic._id)
art_by_cat = self.user.stats.getArtifactsByCategory(detailed=True)
assert lm_art['created'] == init_lm_art['created'] + 2
assert lm_art['modified'] == init_lm_art['modified']
assert artifacts['created'] == init_art['created'] + 3
assert artifacts['modified'] == init_art['modified']
assert art_wiki['created'] == init_art_wiki['created'] + 3
assert art_wiki['modified'] == init_art_wiki['modified']
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created'] + 3
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified']
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created'] + 2
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified']
assert art_sci['created'] == init_art_sci['created'] + 1
assert art_sci['modified'] == init_art_sci['modified']
assert dict(messagetype='Wiki', created=1,
modified=0) in art_by_cat[topic]
art_by_cat = self.user.stats.getArtifactsByCategory(detailed=False)
assert art_by_cat[topic]['created'] == 1 and art_by_cat[
topic]['modified'] == 0
@td.with_user_project('test-user-2')
def test_modify_artifact_stats(self):
setup_trove_categories()
p = Project.query.get(shortname='u/test-user-2')
topic = TroveCategory.query.get(shortname='scientific')
init_lm_art = self.user.stats.getLastMonthArtifacts()
init_art = self.user.stats.getArtifacts()
init_art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
init_art_by_type = self.user.stats.getArtifactsByType()
init_lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
init_art_sci = self.user.stats.getArtifacts(category=topic._id)
self.user.stats.addModifiedArtifact('Wiki', datetime.utcnow(), p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
assert lm_art['created'] == init_lm_art['created']
assert lm_art['modified'] == init_lm_art['modified'] + 1
assert artifacts['created'] == init_art['created']
assert artifacts['modified'] == init_art['modified'] + 1
assert art_wiki['created'] == init_art_wiki['created']
assert art_wiki['modified'] == init_art_wiki['modified'] + 1
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created']
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified'] + 1
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created']
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified'] + 1
# In that case, last month stats should not be changed
new_date = datetime.utcnow() + timedelta(-32)
self.user.stats.addModifiedArtifact('Wiki', new_date, p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
assert lm_art['created'] == init_lm_art['created']
assert lm_art['modified'] == init_lm_art['modified'] + 1
assert artifacts['created'] == init_art['created']
assert artifacts['modified'] == init_art['modified'] + 2
assert art_wiki['created'] == init_art_wiki['created']
assert art_wiki['modified'] == init_art_wiki['modified'] + 2
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created']
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified'] + 2
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created']
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified'] + 1
p.trove_topic = [topic._id]
self.user.stats.addModifiedArtifact('Wiki', datetime.utcnow(), p)
lm_art = self.user.stats.getLastMonthArtifacts()
artifacts = self.user.stats.getArtifacts()
art_wiki = self.user.stats.getArtifacts(art_type='Wiki')
art_by_type = self.user.stats.getArtifactsByType()
lm_art_by_type = self.user.stats.getLastMonthArtifactsByType()
art_sci = self.user.stats.getArtifacts(category=topic._id)
art_by_cat = self.user.stats.getArtifactsByCategory(detailed=True)
assert lm_art['created'] == init_lm_art['created']
assert lm_art['modified'] == init_lm_art['modified'] + 2
assert artifacts['created'] == init_art['created']
assert artifacts['modified'] == init_art['modified'] + 3
assert art_wiki['created'] == init_art_wiki['created']
assert art_wiki['modified'] == init_art_wiki['modified'] + 3
assert art_by_type['Wiki'][
'created'] == init_art_by_type['Wiki']['created']
assert art_by_type['Wiki'][
'modified'] == init_art_by_type['Wiki']['modified'] + 3
assert lm_art_by_type['Wiki'][
'created'] == init_lm_art_by_type['Wiki']['created']
assert lm_art_by_type['Wiki'][
'modified'] == init_lm_art_by_type['Wiki']['modified'] + 2
assert art_sci['created'] == init_art_sci['created']
assert art_sci['modified'] == init_art_sci['modified'] + 1
assert dict(messagetype='Wiki', created=0,
modified=1) in art_by_cat[topic]
art_by_cat = self.user.stats.getArtifactsByCategory(detailed=False)
assert art_by_cat[topic]['created'] == 0 and art_by_cat[
topic]['modified'] == 1
@td.with_user_project('test-user-2')
def test_ticket_stats(self):
setup_trove_categories()
p = Project.query.get(shortname='u/test-user-2')
topic = TroveCategory.query.get(shortname='scientific')
create_time = datetime.utcnow() + timedelta(-5)
init_lm_tickets_art = self.user.stats.getLastMonthArtifacts(
art_type='Ticket')
init_tickets_art = self.user.stats.getArtifacts(art_type='Ticket')
init_tickets_sci_art = self.user.stats.getArtifacts(category=topic._id)
init_tickets = self.user.stats.getTickets()
init_lm_tickets = self.user.stats.getLastMonthTickets()
self.user.stats.addNewArtifact('Ticket', create_time, p)
lm_tickets_art = self.user.stats.getLastMonthArtifacts(
art_type='Ticket')
tickets_art = self.user.stats.getArtifacts(art_type='Ticket')
tickets_sci_art = self.user.stats.getArtifacts(category=topic._id)
assert lm_tickets_art['created'] == init_lm_tickets_art['created'] + 1
assert lm_tickets_art['modified'] == init_lm_tickets_art['modified']
assert tickets_art['created'] == init_tickets_art['created'] + 1
assert tickets_art['modified'] == init_tickets_art['modified']
assert tickets_sci_art['created'] == tickets_sci_art['created']
assert tickets_sci_art['modified'] == tickets_sci_art['modified']
p.trove_topic = [topic._id]
self.user.stats.addAssignedTicket(create_time, p)
tickets = self.user.stats.getTickets()
lm_tickets = self.user.stats.getLastMonthTickets()
assert tickets['assigned'] == init_tickets['assigned'] + 1
assert tickets['revoked'] == init_tickets['revoked']
assert tickets['solved'] == init_tickets['solved']
assert tickets['averagesolvingtime'] is None
assert lm_tickets['assigned'] == init_lm_tickets['assigned'] + 1
assert lm_tickets['revoked'] == init_lm_tickets['revoked']
assert lm_tickets['solved'] == init_lm_tickets['solved']
assert lm_tickets['averagesolvingtime'] is None
self.user.stats.addRevokedTicket(create_time + timedelta(-32), p)
tickets = self.user.stats.getTickets()
assert tickets['assigned'] == init_tickets['assigned'] + 1
assert tickets['revoked'] == init_tickets['revoked'] + 1
assert tickets['solved'] == init_tickets['solved']
assert tickets['averagesolvingtime'] is None
assert lm_tickets['assigned'] == init_lm_tickets['assigned'] + 1
assert lm_tickets['revoked'] == init_lm_tickets['revoked']
assert lm_tickets['solved'] == init_lm_tickets['solved']
assert lm_tickets['averagesolvingtime'] is None
self.user.stats.addClosedTicket(
create_time, create_time + timedelta(1), p)
tickets = self.user.stats.getTickets()
lm_tickets = self.user.stats.getLastMonthTickets()
assert tickets['assigned'] == init_tickets['assigned'] + 1
assert tickets['revoked'] == init_tickets['revoked'] + 1
assert tickets['solved'] == init_tickets['solved'] + 1
solving_time = dict(seconds=0, minutes=0, days=1, hours=0)
assert tickets['averagesolvingtime'] == solving_time
assert lm_tickets['assigned'] == init_lm_tickets['assigned'] + 1
assert lm_tickets['revoked'] == init_lm_tickets['revoked']
assert lm_tickets['solved'] == init_lm_tickets['solved'] + 1
assert lm_tickets['averagesolvingtime'] == solving_time
p.trove_topic = []
self.user.stats.addClosedTicket(
create_time, create_time + timedelta(3), p)
tickets = self.user.stats.getTickets()
lm_tickets = self.user.stats.getLastMonthTickets()
solving_time = dict(seconds=0, minutes=0, days=2, hours=0)
assert tickets['assigned'] == init_tickets['assigned'] + 1
assert tickets['revoked'] == init_tickets['revoked'] + 1
assert tickets['solved'] == init_tickets['solved'] + 2
assert tickets['averagesolvingtime'] == solving_time
assert lm_tickets['assigned'] == init_lm_tickets['assigned'] + 1
assert lm_tickets['revoked'] == init_lm_tickets['revoked']
assert lm_tickets['solved'] == init_lm_tickets['solved'] + 2
assert lm_tickets['averagesolvingtime'] == solving_time
by_cat = self.user.stats.getTicketsByCategory()
lm_by_cat = self.user.stats.getLastMonthTicketsByCategory()
solving_time = dict(days=1, hours=0, minutes=0, seconds=0)
assert by_cat[topic]['assigned'] == 1
assert by_cat[topic]['revoked'] == 1
assert by_cat[topic]['solved'] == 1
assert by_cat[topic]['averagesolvingtime'] == solving_time
assert lm_by_cat[topic]['assigned'] == 1
assert lm_by_cat[topic]['revoked'] == 0
assert lm_by_cat[topic]['solved'] == 1
assert lm_by_cat[topic]['averagesolvingtime'] == solving_time
@with_git
@td.with_user_project('test-user-2')
def test_commit_stats(self):
setup_trove_categories()
p = Project.query.get(shortname='u/test-user-2')
topic = TroveCategory.query.get(shortname='scientific')
commit_time = datetime.utcnow() + timedelta(-1)
self.user.set_password('testpassword')
addr = M.EmailAddress.upsert('rcopeland@geek.net')
self.user.claim_address('rcopeland@geek.net')
repo_dir = pkg_resources.resource_filename(
'forgeuserstats', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.name = 'testgit.git'
repo = c.app.repo
repo.refresh()
commit = repo.commit('HEAD')
init_commits = self.user.stats.getCommits()
assert init_commits['number'] == 4
init_lmcommits = self.user.stats.getLastMonthCommits()
assert init_lmcommits['number'] == 4
p.trove_topic = [topic._id]
self.user.stats.addCommit(commit, datetime.utcnow(), p)
commits = self.user.stats.getCommits()
assert commits['number'] == init_commits['number'] + 1
assert commits['lines'] == init_commits['lines'] + 1
lmcommits = self.user.stats.getLastMonthCommits()
assert lmcommits['number'] == init_lmcommits['number'] + 1
assert lmcommits['lines'] == init_lmcommits['lines'] + 1
by_cat = self.user.stats.getCommitsByCategory()
assert by_cat[topic]['number'] == 1
assert by_cat[topic]['lines'] == 1
lm_by_cat = self.user.stats.getLastMonthCommitsByCategory()
assert lm_by_cat[topic]['number'] == 1
assert lm_by_cat[topic]['lines'] == 1
self.user.stats.addCommit(
commit, datetime.utcnow() + timedelta(-40), p)
commits = self.user.stats.getCommits()
assert commits['number'] == init_commits['number'] + 2
assert commits['lines'] == init_commits['lines'] + 2
lmcommits = self.user.stats.getLastMonthCommits()
assert lmcommits['number'] == init_lmcommits['number'] + 1
assert lmcommits['lines'] == init_lmcommits['lines'] + 1
by_cat = self.user.stats.getCommitsByCategory()
assert by_cat[topic]['number'] == 2
assert by_cat[topic]['lines'] == 2
lm_by_cat = self.user.stats.getLastMonthCommitsByCategory()
assert lm_by_cat[topic]['number'] == 1
assert lm_by_cat[topic]['lines'] == 1
@td.with_user_project('test-user-2')
def test_login_stats(self):
init_logins = self.user.stats.tot_logins_count
init_lm_logins = self.user.stats.getLastMonthLogins()
login_datetime = datetime.utcnow()
self.user.stats.addLogin(login_datetime)
logins = self.user.stats.tot_logins_count
lm_logins = self.user.stats.getLastMonthLogins()
assert logins == init_logins + 1
assert lm_logins == init_lm_logins + 1
assert abs(self.user.stats.last_login -
login_datetime) < timedelta(seconds=1)
self.user.stats.addLogin(datetime.utcnow() + timedelta(-32))
logins = self.user.stats.tot_logins_count
lm_logins = self.user.stats.getLastMonthLogins()
assert logins == init_logins + 2
assert lm_logins == init_lm_logins + 1
assert abs(self.user.stats.last_login -
login_datetime) < timedelta(seconds=1)
def test_start_date(self):
stats = USM.UserStats(registration_date=datetime(2012, 04, 01))
self.assertEqual(stats.start_date, datetime(2012, 04, 01))
with h.push_config(config, **{'userstats.start_date': '2013-04-01'}):
self.assertEqual(stats.start_date, datetime(2013, 04, 01))
with h.push_config(config, **{'userstats.start_date': '2011-04-01'}):
self.assertEqual(stats.start_date, datetime(2012, 04, 01))
@mock.patch('allura.model.stats.difflib.unified_diff')
def test_count_loc(self, unified_diff):
stats = USM.UserStats()
newcommit = mock.Mock(
parent_ids=['deadbeef'],
diffs=mock.Mock(
changed=[mock.MagicMock()],
copied=[mock.MagicMock()],
added=[mock.MagicMock()],
),
)
unified_diff.return_value = ['+++', '---', '+line']
newcommit.tree.get_blob_by_path.return_value = mock.MagicMock()
newcommit.tree.get_blob_by_path.return_value.__iter__.return_value = [
'one']
newcommit.repo.commit(
).tree.get_blob_by_path.return_value = mock.MagicMock()
newcommit.repo.commit().tree.get_blob_by_path.return_value.__iter__.return_value = [
'two']
commit_datetime = datetime.utcnow()
project = mock.Mock(
trove_topic=[],
trove_language=[],
)
stats.addCommit(newcommit, commit_datetime, project)
self.assertEqual(stats.general[0].commits[0],
{'lines': 3, 'number': 1, 'language': None})
unified_diff.reset_mock()
with h.push_config(config, **{'userstats.count_lines_of_code': 'false'}):
stats.addCommit(newcommit, commit_datetime, project)
self.assertEqual(stats.general[0].commits[0],
{'lines': 3, 'number': 2, 'language': None})
unified_diff.assert_not_called()
| apache/incubator-allura | ForgeUserStats/forgeuserstats/tests/test_model.py | Python | apache-2.0 | 22,990 |
# Copyright 2015 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import mock
import tooz.coordination
import tooz.locking
from compute_hyperv.nova import coordination
from compute_hyperv.tests.unit import test_base
if hasattr(inspect, 'getfullargspec'):
getargspec = inspect.getfullargspec
else:
getargspec = inspect.getargspec
class Locked(Exception):
pass
class MockToozLock(tooz.locking.Lock):
active_locks = set()
def acquire(self, blocking=True):
if self.name not in self.active_locks:
self.active_locks.add(self.name)
return True
elif not blocking:
return False
else:
raise Locked
def release(self):
self.active_locks.remove(self.name)
@mock.patch('tooz.coordination.get_coordinator')
class CoordinatorTestCase(test_base.HyperVBaseTestCase):
MOCK_TOOZ = False
def test_coordinator_start(self, get_coordinator):
crd = get_coordinator.return_value
agent = coordination.Coordinator()
agent.start()
self.assertTrue(get_coordinator.called)
self.assertTrue(crd.start.called)
def test_coordinator_stop(self, get_coordinator):
crd = get_coordinator.return_value
agent = coordination.Coordinator()
agent.start()
self.assertIsNotNone(agent.coordinator)
agent.stop()
self.assertTrue(crd.stop.called)
self.assertIsNone(agent.coordinator)
def test_coordinator_lock(self, get_coordinator):
crd = get_coordinator.return_value
crd.get_lock.side_effect = lambda n: MockToozLock(n)
agent1 = coordination.Coordinator()
agent1.start()
agent2 = coordination.Coordinator()
agent2.start()
lock_name = 'lock'
expected_name = lock_name.encode('ascii')
self.assertNotIn(expected_name, MockToozLock.active_locks)
with agent1.get_lock(lock_name):
self.assertIn(expected_name, MockToozLock.active_locks)
self.assertRaises(Locked, agent1.get_lock(lock_name).acquire)
self.assertRaises(Locked, agent2.get_lock(lock_name).acquire)
self.assertNotIn(expected_name, MockToozLock.active_locks)
def test_coordinator_offline(self, get_coordinator):
crd = get_coordinator.return_value
crd.start.side_effect = tooz.coordination.ToozConnectionError('err')
agent = coordination.Coordinator()
self.assertRaises(tooz.coordination.ToozError, agent.start)
self.assertFalse(agent.started)
@mock.patch.object(coordination.COORDINATOR, 'get_lock')
class CoordinationTestCase(test_base.HyperVBaseTestCase):
def test_synchronized(self, get_lock):
@coordination.synchronized('lock-{f_name}-{foo.val}-{bar[val]}')
def func(foo, bar):
pass
foo = mock.Mock()
foo.val = 7
bar = mock.MagicMock()
bar.__getitem__.return_value = 8
func(foo, bar)
get_lock.assert_called_with('lock-func-7-8')
self.assertEqual(['foo', 'bar'], getargspec(func)[0])
| stackforge/compute-hyperv | compute_hyperv/tests/unit/test_coordination.py | Python | apache-2.0 | 3,659 |
#
# =================================================================
# =================================================================
# def _enum(**enums):
# return type('Enum', (), enums)
import eventlet
from eventlet import greenthread
import paxes_cinder.k2aclient.k2asample as k2asample
from paxes_cinder.k2aclient.v1 import k2uom
from paxes_cinder.k2aclient.k2asample import dump_k2resp
from paxes_cinder.k2aclient import client
from paxes_cinder.k2aclient.openstack.common import lockutils
from paxes_cinder.k2aclient import exceptions as k2exceptions
from paxes_cinder.k2aclient.k2asample.k2_ssp_cluster_vios_snap \
import cluster_vios_snap
from itertools import repeat
from collections import deque
import time
import pickle
import logging
from os.path import expanduser
from random import randrange
import json
import random
import datetime
import paxes_cinder.k2aclient.v1.cluster_manager as cluster_manager
# import numpy as np
MOCK = False
VIOS_DUMP_ACTIVATED = False
synchronized = lockutils.synchronized_with_prefix('k2a-')
class MockLu(object):
ugenid = 0
def __init__(self):
self.unique_device_id = MockLu.ugenid
MockLu.ugenid += 1
def _timer(prev_time):
"""Simple timer"""
return time.time() - prev_time
def _chunks(l, n):
""" Yield successive n-sized chunks from l.
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
_last_dump = None
@synchronized('simulation')
def _process_k2_exception(simulation, e):
if not VIOS_DUMP_ACTIVATED:
msg = ("Exception:"
" msg: >%s<,"
" VIOS dump is not activated,"
" continuing ...")
print (msg % (e,))
return
time_between_dumps = 300
global _last_dump
if _last_dump is not None:
delta = time.time() - _last_dump
if delta < time_between_dumps:
msg = ("exception: >%s<,"
" recent dump,"
" take a break ...")
print (msg, (e,))
greenthread.sleep(100)
return
dump = False
diagfspec = None
if isinstance(e, k2exceptions.K2aCrudException):
dump = True
diagfspec = e.diagfspec
elif isinstance(e, k2exceptions.K2aK2Error):
dump = True
diagfspec = e.diagfspec
elif isinstance(e, k2exceptions.K2JobFailure):
dump = True
diagfspec = e.diagfspec
if dump and diagfspec is not None:
msg = ("exception: >%s<, "
" take a dump corresponding "
" to e.diagfspec: >%s<, "
" and then take a break ...")
print (msg % (e, diagfspec,))
if simulation.vios_password is not None:
cluster_vios_snap(simulation.image_pool.vios_ips,
diagfspec + ".vios",
password=simulation.vios_password)
greenthread.sleep(100)
_last_dump = time.time()
else:
msg = ("exception: >%s<,"
" but no dump ...")
print (msg % (e,))
def _enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
# def _enum(*sequential):
# enums = dict(zip(sequential,sequential))
# return type('Enum', (), enums)
OperationType = _enum("DEPLOY_FROM_IMAGE",
"DEPLOY_FROM_SNAPSHOT",
"SNAPSHOT_A_DEPLOY",
"DELETE_A_SNAPSHOT",
"DELETE_A_DEPLOY")
DeployState = _enum("INCREASING",
"DECREASING")
SnapshotState = _enum("INCREASING",
"DECREASING")
def _record(simulation, operation, e, duration):
# mu, sigma = 1500, 150
# # # x = mu + sigma * np.random.randn(10000)
# #
# # duration = time.time()-start
# # duration = mu + sigma * np.random.randn()
#
# for i, d in enumerate(duration):
# duration[i] = mu + sigma * np.random.randn()
estr = None
if e is not None:
estr = str(e)
if MOCK:
t = time.time() * 10000.0
else:
t = time.time()
simulation.operations.append((operation.type, estr, duration, t))
# track number of snapshots and number of deploys
if len(simulation.deploys_at_oper) == 0:
prev_deploys = 0
else:
prev_deploys = simulation.deploys_at_oper[-1]
if len(simulation.snapshots_at_oper) == 0:
prev_snapshots = 0
else:
prev_snapshots = simulation.snapshots_at_oper[-1]
if operation.type == OperationType.DEPLOY_FROM_IMAGE:
simulation.deploys_at_oper.append(prev_deploys + 1)
simulation.snapshots_at_oper.append(prev_snapshots)
elif operation.type == OperationType.DEPLOY_FROM_SNAPSHOT:
simulation.deploys_at_oper.append(prev_deploys + 1)
simulation.snapshots_at_oper.append(prev_snapshots)
elif operation.type == OperationType.SNAPSHOT_A_DEPLOY:
simulation.deploys_at_oper.append(prev_deploys)
simulation.snapshots_at_oper.append(prev_snapshots + 1)
elif operation.type == OperationType.DELETE_A_SNAPSHOT:
simulation.deploys_at_oper.append(prev_deploys)
simulation.snapshots_at_oper.append(prev_snapshots - 1)
elif operation.type == OperationType.DELETE_A_DEPLOY:
simulation.deploys_at_oper.append(prev_deploys - 1)
simulation.snapshots_at_oper.append(prev_snapshots)
def _parse_vios(node_vios):
node_parts = node_vios.split('/')
ms_id = node_parts[-3]
vios_id = node_parts[-1]
return ms_id, vios_id
class ImagePool(object):
def __init__(self, cs, cluster_id, existing=None, fake=None):
if MOCK:
self._cs = cs
self._cluster = None
self._ssp_id = None
self._ssp = None
self._fake = True # MOCK is always fake
self._next = 0
if fake is not None:
prefix, num_images, image_size, thin, lut = fake
self._images = num_images * [None]
else:
self._images = len(existing) * [None]
return
self._cs = cs
self._cluster = self._cs.cluster.get(cluster_id)
self._ssp_id = self._cluster.sharedstoragepool_id()
self._ssp = self._cs.sharedstoragepool.get(self._ssp_id)
self._images = []
self._next = 0
# vios
self._vios_ips = []
for node in self._cluster.node.node:
if not node.virtual_io_server:
print(_("Node: >%s<,"
" has no virtual_io_server,"
" continuing ...") % node.partition_name)
ms_id, vios_id = _parse_vios(node.virtual_io_server)
try:
vios = cs.\
virtualioserver.get(ms_id,
vios_id,
xag=["None"])
except Exception as e:
msg = _("Failed to retrieve"
" node: >%s<,"
" msg: >%s<,"
" continuing ...")
raise Exception(msg % (node.partition_name, e))
self._vios_ips.append(vios.resource_monitoring_ip_address)
# if fake is not None then (mock) image LUs will be created
self._fake = False
if existing is None and fake is None:
raise ValueError("must specify existing or fake")
if existing is not None and fake is not None:
x = "must specify either existing or fake, but not both"
raise ValueError(x)
if fake is not None:
self._fake = True
prefix, num_images, image_size, thin, lut = fake
n = cs.sharedstoragepool.create_unique_name
images = [(n("%s%07d" % (prefix, i,)),
image_size, thin, lut) for i in range(num_images)]
(image_lu_pool, self._ssp) = self._ssp.update_append_lus(images)
# self._images = [lu.unique_device_id for lu in image_lu_pool]
self._images = image_lu_pool
elif existing is not None:
self._images = []
for lu in self._ssp.logical_units.logical_unit:
if lu.unit_name in existing:
self._images.append(lu)
# self._images.append(lu.unique_device_id)
if len(self._images) == 0:
raise Exception("Empty Image Pool")
@property
def cs(self):
return self._cs
@property
def cluster(self):
return self._cluster
@property
def ssp_id(self):
return self._ssp_id
@property
def size(self):
return len(self._images)
@property
def ssp(self):
return self._ssp
@ssp.setter
def ssp(self, value):
self._ssp = value
def next(self):
if len(self._images) == 0:
return None
if self._next > len(self._images) - 1:
self._next = 0
self._next += 1
return self._images[self._next - 1]
def destroy(self):
if not self._fake:
print "too dangerous, wont destroy existing images"
self._images = []
return
if not MOCK:
# chunk to work around k2 timeouts
chunksize = 10
for chunk in _chunks(self._images, chunksize):
ssp = self._cs.sharedstoragepool.get(self._ssp_id)
lu_udids = [lu.unique_device_id for lu in chunk]
ssp.update_del_lus(lu_udids)
# ssp.update_del_lus(chunk)
self._images = []
class Operation(object):
def __init__(self, otype):
self._otype = otype
self._op_number = None
# self._duration = None
#
# @property
# def duration(self):
# return self._duration
# @duration.setter
# def duration(self, value):
# self._duration = value
@property
def op_number(self):
return self._op_number
@op_number.setter
def op_number(self, value):
self._op_number = value
@property
def type(self):
return self._otype
@property
def type_as_str(self):
return OperationType.reverse_mapping[self._otype]
class K2_XA(object):
def __init__(self, tag):
self._tag = tag
self._i = 0
self._ct = time.localtime()
def _fmt(self):
return ("%(tag)s %(ts)s %(i)d" %
{"tag": self._tag,
"ts": time.strftime("%Y-%m-%d %H:%M:%S", self._ct),
"i": self._i})
def r(self):
return self._fmt()
def ri(self):
x = self._fmt()
self._i += 1
return x
# basic clone operation
def _clone(
simulation,
operation,
source_lu,
dest_lu_unit_name,
dest_lu_lut):
n = simulation.image_pool.cs.sharedstoragepool.create_unique_name
dest_lu_unit_name = n(dest_lu_unit_name)
# print ("start clone: >%s<" % (dest_lu_unit_name,))
cluster = simulation.image_pool.cluster
e = None
dest_lu_udid = None
times = []
if MOCK:
# print "Clone: unique_device_id: >%s<" % (dest_lu.unique_device_id, )
if (simulation.tick_count % 90) == 0:
e = Exception("Mock Exception")
times.append(random.uniform(0, 4))
times.append(random.uniform(0, 4))
else:
status = "COMPLETED_OK"
dest_lu_udid = MockLu().unique_device_id
times.append(random.uniform(3, 5))
times.append(random.uniform(25, 30))
return e, dest_lu_udid, times
start = time.time() # only for exception
try:
(status, dest_lu_udid, job_id) = \
cluster.api.lu_linked_clone_of_lu_bj(
cluster,
source_lu,
dest_lu_unit_name,
dest_lu_logical_unit_type=dest_lu_lut,
xa=simulation.xa(operation))
print "clone: name: >%s<, lu_udid: >%s<" % \
(dest_lu_unit_name, dest_lu_udid)
times.append(time.time() - start)
times.append(0.0)
# print " time: >%f<, >%f<" % (times[0], times[1]) #DEBUGA
if status != "COMPLETED_OK":
msg = "issue for clone: >%s<, job_id: >%s<, status: >%s<"
x = msg % (dest_lu_unit_name, job_id, status,)
print (x)
except Exception as e:
msg = "exception for clone: >%s<"
x = msg % (dest_lu_unit_name,)
print (x)
_process_k2_exception(simulation, e)
times = []
times.append(time.time() - start)
times.append(0.0)
simulation.total_number_of_deploy_exceptions += 1
return e, dest_lu_udid, times
# basic clone operation
def _clone_ds(
simulation,
operation,
source_lu,
dest_lu_unit_name,
dest_lu_lut):
n = simulation.image_pool.cs.sharedstoragepool.create_unique_name
dest_lu_unit_name = n(dest_lu_unit_name)
# print ("start clone: >%s<" % (dest_lu_unit_name,))
cluster = simulation.image_pool.cluster
e = None
dest_lu = None
times = []
if MOCK:
# print "Clone: unique_device_id: >%s<" % (dest_lu.unique_device_id, )
if (simulation.tick_count % 90) == 0:
e = Exception("Mock Exception")
times.append(random.uniform(0, 4))
times.append(random.uniform(0, 4))
else:
status = "COMPLETED_OK"
dest_lu = MockLu()
times.append(random.uniform(3, 5))
times.append(random.uniform(25, 30))
return e, dest_lu, times
start = time.time()
try:
# CREATE TARGET
(status, target_udid, job_id) = \
cluster.lu_create(dest_lu_unit_name,
source_lu.unit_capacity,
xa=simulation.xa(operation))
if status != "COMPLETED_OK":
msg = "issue for job >%s< create: >%s<, status: >%s<"
x = msg % (job_id, dest_lu_unit_name, status,)
print (x)
times.append(time.time() - start)
except Exception as e:
msg = "exception for create: >%s<"
x = msg % (dest_lu_unit_name,)
print (x)
_process_k2_exception(simulation, e)
times.append(time.time() - start)
times.append(0.0)
return e, dest_lu, times
start = time.time()
try:
# CLONE
status, job_id = cluster.lu_linked_clone(
source_lu.unique_device_id,
target_udid,
xa=simulation.xa(operation))
times.append(time.time() - start)
if status != "COMPLETED_OK":
msg = "issue for clone: >%s<, job_id: >%s<, status: >%s<"
x = msg % (dest_lu_unit_name, job_id, status,)
print (x)
except Exception as e:
msg = "exception for clone: >%s<"
x = msg % (dest_lu_unit_name,)
print (x)
_process_k2_exception(simulation, e)
times.append(time.time() - start)
return e, dest_lu, times
# print "Create: unique_device_id: >%s<" % \
# (dest_lu.unique_device_id) #DEBUGA
simulation.total_number_of_deploy_exceptions += 1
return e, target_udid, times
def _delete(simulation, del_lu_name, del_lu_udid, operation):
# print ("Delete: unique_device_id: >%s<" % (del_lu_udid,)) #DEBUGA
e = None
times = []
if MOCK:
# print "Delete: unique_device_id: >%s<" % (del_lu_udid, )
times.append(random.uniform(3, 5))
else:
start = time.time()
print "delete: name: >%s<, udid: >%s<" % (del_lu_name, del_lu_udid)
try:
ssp = simulation.image_pool.ssp.update_del_lus(
[del_lu_udid],
xa=simulation.xa(operation))
simulation.image_pool.ssp = ssp
except Exception as e:
msg = "exception for delete: >%s<"
x = msg % (del_lu_udid,)
print (x)
_process_k2_exception(simulation, e)
times.append(time.time() - start)
# print " time: >%f<" % (times[0]) #DEBUGA
if e is not None:
simulation.total_number_of_delete_exceptions += 1
return e, times
def simulate(simulation,
prefix):
def perform_operation(simulation, operation):
####################
# DEPLOY_FROM_IMAGE
if operation.type == OperationType.DEPLOY_FROM_IMAGE:
# print "Operation: DEPLOY_FROM_IMAGE" #DEBUGA
# simulation.deploys.append("dfi")
# source_lu = simulation.image_pool.next()
source_lu = simulation.image_pool.next()
x = "P2Z-DEPLOY_FROM_IMAGE-%07d"
dest_lu_unit_name = x % (simulation.next_clone_number(),)
times = []
e, new_lu_udid, times = _clone(
simulation,
operation,
source_lu,
dest_lu_unit_name,
"VirtualIO_Disk")
_record(simulation, operation, e, times)
simulation._tick()
simulation.check_for_termination()
if e is not None:
raise e
simulation.deploys[new_lu_udid] = (
dest_lu_unit_name,
source_lu.unit_capacity,
source_lu.thin_device)
simulation.total_number_of_image_deploys += 1
####################
# DEPLOY_FROM_SNAPSHOT
elif operation.type == OperationType.DEPLOY_FROM_SNAPSHOT:
if len(simulation.snapshots) < 1:
# no snapshot to deploy
return
# print "Operation: DEPLOY_FROM_SNAPSHOT" #DEBUGA
# simulation.deploys.append("dfs")
ilu = randrange(len(simulation.snapshots))
source_lu = simulation.snapshots[ilu]
x = "P2Z-DEPLOY_FROM_SNAPSHOT-%07d"
dest_lu_unit_name = x % (simulation.next_clone_number(),)
e, new_lu_udid, times = _clone(
simulation,
operation,
source_lu,
dest_lu_unit_name,
"VirtualIO_Disk")
_record(simulation, operation, e, times)
simulation._tick()
simulation.check_for_termination()
if e is not None:
raise e
simulation.deploys[new_lu_udid] = (
dest_lu_unit_name,
source_lu.unit_capacity,
source_lu.thin_device)
simulation.total_number_of_snapshot_deploys += 1
####################
# SNAPSHOT_A_DEPLOY
elif operation.type == OperationType.SNAPSHOT_A_DEPLOY:
if len(simulation.deploys) < 1:
# nothing to snapshot
return
# print "Operation: SNAPSHOT_A_DEPLOY" #DEBUGA
# simulation.snapshots.append("sd")
keys = simulation.deploys.keys()
ilu = randrange(len(keys))
source_props = simulation.deploys[keys[ilu]]
source_lu = k2uom.LogicalUnit()
source_lu.unit_name = source_props[0]
source_lu.unit_capacity = source_props[1]
source_lu.thin_device = source_props[2]
x = "P2Z-SNAPSHOT_A_DEPLOY-%07d"
dest_lu_unit_name = x % (simulation.next_clone_number(),)
times = []
e, new_lu_udid, times = _clone(
simulation,
operation,
source_lu,
dest_lu_unit_name,
"VirtualIO_Image")
_record(simulation, operation, e, times)
simulation._tick()
simulation.check_for_termination()
if e is not None:
raise e
simulation.snapshots.append(new_lu_udid)
simulation.total_number_of_snapshots += 1
####################
# DELETE_A_SNAPSHOT
elif operation.type == OperationType.DELETE_A_SNAPSHOT:
if len(simulation.snapshots) < 1:
# no snapshot to delete
return
# print "Operation: DELETE_A_SNAPSHOT" #DEBUGA
ilu = randrange(len(simulation.snapshots))
del_lu_name = simulation.snapshots[ilu].unit_name
del_lu_udid = simulation.snapshots[ilu].unique_device_id
# simulation.image_pool.ssp.update_del_lus([del_lu_udid])
e, times = _delete(simulation, del_lu_name, del_lu_udid, operation)
_record(simulation, operation, e, times)
if e is not None:
raise e
del simulation.snapshots[ilu]
####################
# DELETE_A_DEPLOY
elif operation.type == OperationType.DELETE_A_DEPLOY:
if len(simulation.deploys) < 1:
# no deploy to delete
return
# print "Operation: DELETE_A_DEPLOY" #DEBUGA
keys = simulation.deploys.keys()
ilu = randrange(len(keys))
del_lu_udid = keys[ilu]
(del_lu_name, x, x) = simulation.deploys.pop(del_lu_udid, None)
# del_lu_udid = simulation.deploys[keys[ilu]].unique_device_id
# simulation.image_pool.ssp.update_del_lus([del_lu_udid])
e, times = _delete(simulation, del_lu_name, del_lu_udid, operation)
_record(simulation, operation, e, times)
if e is not None:
raise e
# del simulation.deploys[ilu]
####################
# ERRORS
else:
raise Exception("programming error")
# cluster = cs.cluster.get(cluster_id)
# ssp_id = cluster.sharedstoragepool_id()
# ssp = cs.sharedstoragepool.get(ssp_id)
simulation.start_time = time.time()
pool = eventlet.GreenPool(simulation.num_threads)
op_number = 0
while True:
if simulation.terminate:
print "TERMINATE"
break
if len(simulation.opq) < 1:
simulation.schedule()
operation = simulation.opq.popleft()
operation.op_number = op_number
op_number += 1
try:
pool.spawn_n(perform_operation, simulation, operation)
except (SystemExit, KeyboardInterrupt): # TODO are these correct?
break
pool.waitall()
# done
simulation.checkpoint()
class Simulation(object):
SNAPSHOT_ON = False
DEPLOY_TO_SNAPSHOT_RATIO = 5
SNAPSHOT_STEP_FORWARD = 4 # must be greater than 1
DEPLOY_IMAGE_PER_CYCLE = 2
DEPLOY_SNAPSHOT_PER_CYCLE = 1
DEPLOY_STEP_FORWARD = 10
SECOND_ORDER = True
def __init__(self, result_file,
title,
vios_password,
target_number_of_deploys,
min_deploys, max_deploys, min_snapshots,
max_snapshots, image_pool, num_threads):
self.result_file = result_file
self.title = title
self.vios_password = vios_password
self.image_pool = image_pool
self.num_threads = num_threads
self.start_time = -1
self.checkpoint_time = -1
self.target_number_of_deploys = target_number_of_deploys
assert max_deploys > min_deploys
self.min_deploys = min_deploys
self.max_deploys = max_deploys
assert max_snapshots > min_snapshots
self.min_snapshots = min_snapshots
self.max_snapshots = max_snapshots
self.current_dtsr = 0
self._current_clone_number = 0
# statistics
self.image_pool_size = image_pool.size
self.total_number_of_image_deploys = 0
self.total_number_of_snapshot_deploys = 0
self.total_number_of_snapshots = 0
self.current_deploystate = DeployState.INCREASING
self.current_snapshotstate = SnapshotState.INCREASING
self.opq = deque([])
self.deploys = {}
self.snapshots = []
self.operations = []
self.deploys_at_oper = []
self.snapshots_at_oper = []
self.snapshot_inflections = []
self.deploy_inflections = []
self.exceptions = []
self.total_number_of_deploy_exceptions = 0
self.total_number_of_delete_exceptions = 0
self.tick_count = 0
self.terminate = False
@property
def total_deploys(self):
return self.total_number_of_image_deploys + \
self.total_number_of_snapshot_deploys
def check_for_termination(self):
if self.total_deploys > self.target_number_of_deploys:
self.terminate = True
def schedule(self):
issnapshot = (self.current_dtsr % self.DEPLOY_TO_SNAPSHOT_RATIO) == 0
self.current_dtsr += 1
if self.SNAPSHOT_ON and issnapshot:
while True:
if self.current_snapshotstate is SnapshotState.INCREASING:
if len(self.snapshots) > self.max_snapshots:
opnum = len(self.operations)
x = "Snapshot: INCREASING -> DECREASING at op # >%d<"
print x % (opnum,)
self.snapshot_inflections.append(("I2D", opnum))
self.current_snapshotstate = SnapshotState.DECREASING
continue
# 1st order
op = OperationType.SNAPSHOT_A_DEPLOY
ct = self.DEPLOY_SNAPSHOT_PER_CYCLE * \
self.SNAPSHOT_STEP_FORWARD
self.opq.extend(repeat(Operation(op), ct))
# 2nd order
if self.SECOND_ORDER:
op = OperationType.DELETE_A_SNAPSHOT
ct = self.DEPLOY_SNAPSHOT_PER_CYCLE
self.opq.extend(repeat(Operation(op), ct))
return
elif self.current_snapshotstate is SnapshotState.DECREASING:
if len(self.snapshots) < self.min_snapshots:
opnum = len(self.operations)
x = "Snapshot: DECREASING -> INCREASING at op # >%d<"
print x % (opnum,)
self.snapshot_inflections.append(("D2I", opnum))
self.current_snapshotstate = SnapshotState.INCREASING
continue
# 1st order
op = OperationType.DELETE_A_SNAPSHOT
ct = self.DEPLOY_SNAPSHOT_PER_CYCLE * \
self.SNAPSHOT_STEP_FORWARD
self.opq.extend(repeat(Operation(op), ct))
# 2nd order
if self.SECOND_ORDER:
op = OperationType.SNAPSHOT_A_DEPLOY
ct = self.DEPLOY_SNAPSHOT_PER_CYCLE
self.opq.extend(repeat(Operation(op), ct))
return
else:
raise Exception("Programming Error")
else:
while True:
if self.current_deploystate is DeployState.INCREASING:
if len(self.deploys) > self.max_deploys:
opnum = len(self.operations)
x = "Deploy: INCREASING -> DECREASING at op # >%d<"
print x % (opnum,)
self.deploy_inflections.append(("I2D", opnum))
self.current_deploystate = DeployState.DECREASING
continue
# 1st order for IMAGE
ot = OperationType.DEPLOY_FROM_IMAGE
ct = self.DEPLOY_IMAGE_PER_CYCLE * self.DEPLOY_STEP_FORWARD
self.opq.extend(repeat(Operation(ot), ct))
# 1st order for DEPLOY
ot = OperationType.DEPLOY_FROM_SNAPSHOT
ct = self.DEPLOY_SNAPSHOT_PER_CYCLE * \
self.DEPLOY_STEP_FORWARD
self.opq.extend(repeat(Operation(ot), ct))
# 2nd order
if self.SECOND_ORDER:
ot = OperationType.DELETE_A_DEPLOY
ct = self.DEPLOY_IMAGE_PER_CYCLE + \
self.DEPLOY_SNAPSHOT_PER_CYCLE
self.opq.extend(repeat(Operation(ot), ct))
return
elif self.current_deploystate is DeployState.DECREASING:
if len(self.deploys) < self.min_deploys:
opnum = len(self.operations)
x = "Deploy: DECREASING -> INCREASING at op # >%d<"
print x % (opnum,)
self.deploy_inflections.append(("D2I", opnum))
self.current_deploystate = DeployState.INCREASING
continue
ot = OperationType.DELETE_A_DEPLOY
ct = (self.DEPLOY_IMAGE_PER_CYCLE +
self.DEPLOY_SNAPSHOT_PER_CYCLE)
ct = ct * self.DEPLOY_STEP_FORWARD
self.opq.extend(repeat(Operation(ot), ct))
if self.SECOND_ORDER:
ot = OperationType.DEPLOY_FROM_IMAGE
ct = self.DEPLOY_IMAGE_PER_CYCLE
self.opq.extend(repeat(Operation(ot), ct))
ot = OperationType.DEPLOY_FROM_SNAPSHOT
ct = self.DEPLOY_SNAPSHOT_PER_CYCLE
self.opq.extend(repeat(Operation(ot), ct))
return
else:
raise Exception("Programming Error")
def next_clone_number(self):
cn = self._current_clone_number
self._current_clone_number += 1
return cn
def _tick(self):
self.tick_count += 1
if (self.tick_count % 100) == 0:
print "Operation number: >%d<" % (self.tick_count)
if (self.tick_count % 10) == 0:
self.checkpoint()
def checkpoint(self):
# save for plotting
with open(self.result_file, 'w') as f:
self.checkpoint_time = time.time()
pickle.dump(self, f)
def xa(self, op):
return (self.title +
"-" +
OperationType.reverse_mapping[op.type] +
"-" +
str(op.op_number)
)
# def simulation(cs, cluster_id, image_pool):
def simulation(title,
cluster_id,
image_pool,
result_file,
vios_password=None,
num_threads=5,
target_number_of_deploys=100,
min_deploys=10,
max_deploys=20,
min_snapshots=10,
max_snapshots=20):
s = Simulation(result_file,
title,
vios_password,
target_number_of_deploys,
min_deploys, max_deploys, min_snapshots,
max_snapshots, image_pool, num_threads)
start_time = time.time()
print "START"
x = {}
x["num_threads"] = num_threads
x["target_number_of_deploys"] = target_number_of_deploys
x["min_deploys"] = min_deploys
x["max_deploys"] = max_deploys
x["min_snapshots"] = min_snapshots
x["max_snapshots"] = max_snapshots
print json.dumps(x, indent=4)
simulate(s, "P2Z-")
total_time = time.time() - start_time
print "END: total runtime: h:m:s >%s<" % \
(datetime.timedelta(seconds=int(total_time)))
return s
def run_simulation_with_pool():
"""Setup existing image pool and run"""
k2acfg = k2asample.getk2acfg()
k2asample.configure_logging(logging.getLevelName(k2acfg['loglevel']))
# k2asample.configure_logging(logging.DEBUG,
# k2_loglevel=logging.WARNING,
# logdir=expanduser("~"))
# # gerald 238
# k2_url = "9.114.181.238"
# k2_password = "Passw0rd"
# k2_url = "hmc5.watson.ibm.com"
# k2_password = k2acfg['k2_password']
# cluster_id = "04628d39-67df-3047-b90e-c4d9b4057267" # p730_810_A
# result_file = 'my_sim_003_gerald'
# # gerald 168
# k2_url = "9.114.181.168"
# k2_password = "passw0rd"
# cluster_id = "02803f50-7063-3602-a304-fb54e4ca2d44" # p730_810_A
# result_file = 'my_sim_003_gerald_168'
# # N23 / N24
# title = "N23/N24"
# k2_url = "hmc5.watson.ibm.com"
# k2_password = k2acfg['k2_password']
# cluster_id = "ea1b0b5f-3b3a-39dc-bade-6e9cebd18bb2" # cluster-a
# result_file = 'my_sim_003_cluster_a'
# # REJY
# title = "REJY"
# k2_url = "9.126.139.241"
# k2_password = k2acfg['k2_password']
# cluster_id = "c43fbdcd-95f2-3b4a-b643-234ff00eded4" # TestCluster
# result_file = 'my_sim_003_REJY'
# # N8
# title = "N8"
# k2_url = "hmc4.watson.ibm.com"
# k2_password = k2acfg['k2_password']
# cluster_id = "0c737495-d09a-337a-a7e9-6173d4bb6d20" # cluster-c
# result_file = 'my_sim_003_N8'
# vios_password = "sde2013"
# N7
title = "N7"
k2_url = "hmc5.watson.ibm.com"
k2_password = k2acfg['k2_password']
cluster_id = "fe3fbe0f-5ba8-3374-ab75-7b653c9a57ff" # cluster-b
result_file = 'my_sim_003_N7'
vios_password = "sde2013"
vios_password = None
if not MOCK:
cs = client.Client(k2acfg['api_version'],
k2_url, # k2acfg['k2_url'],
k2acfg['k2_username'],
k2_password, # k2acfg['k2_password'],
k2_auditmemento=k2acfg['k2_auditmemento'],
k2_certpath=k2acfg['k2_certpath'],
retries=30, # k2acfg['retries']
timeout=1200, # k2acfg['timeout']
excdir="/tmp/ssp_simulation") # k2acfg['excdir']
else:
cs = None
use_fake_images = True
if not use_fake_images:
existing = ["RHEL64"]
image_pool = ImagePool(cs, cluster_id, existing)
else:
prefix = "P2Z-FAKEIMAGE-"
num_images = 1
image_size = 1
thin = True
lut = "VirtualIO_Image"
fake = (prefix, num_images, image_size, thin, lut)
image_pool = ImagePool(cs, cluster_id, fake=fake)
print "Image_pool_size: >%d<" % (len(image_pool._images),)
num_threads = 5
# num_threads = 1
target_number_of_deploys = 30
target_number_of_deploys = 500
target_number_of_deploys = 5
target_number_of_deploys = 2000
# target_number_of_deploys = 1000
target_number_of_deploys = 1000
min_deploys = 100
# max_deploys = 2000
max_deploys = 200
min_snapshots = 100
max_snapshots = 200
# print "NOMONKEY"
# min_deploys = 5
# max_deploys = 10
s = simulation(title,
cluster_id,
image_pool,
result_file,
vios_password=vios_password,
num_threads=num_threads,
target_number_of_deploys=target_number_of_deploys,
min_deploys=min_deploys,
max_deploys=max_deploys,
min_snapshots=min_snapshots,
max_snapshots=max_snapshots)
image_pool.destroy()
r = {}
r["total_number_of_image_deploys"] = s.total_number_of_image_deploys
r["total_number_of_snapshot_deploys"] = s.total_number_of_snapshot_deploys
r["total_number_of_snapshots"] = s.total_number_of_snapshots
r["current_number_of_deploys"] = len(s.deploys)
r["current_number_of_snapshots"] = len(s.snapshots)
r["total_number_of_deploy_exceptions"] = \
s.total_number_of_deploy_exceptions
r["total_number_of_delete_exceptions"] = \
s.total_number_of_delete_exceptions
print "Result:"
print json.dumps(r, indent=4)
if __name__ == '__main__':
# print "NO MONKEY"
eventlet.monkey_patch()
try:
run_simulation_with_pool()
except Exception as e:
logging.exception(e)
| windskyer/k_cinder | paxes_cinder/k2aclient/k2asample/tool_ssp_simulation.py | Python | apache-2.0 | 36,321 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sparkey
import tempfile
import os
import binascii
import unittest
keys = """
a7cb5f92f019fda84d5dd73c257d6f724402d56a
0fae6c3bec0e162343afee39009c8b7e7ad77747
1bff07d74a2080e1ce2b90b12f30f581f993b56f
d04d6442f15527716e89d012018718d124ac5897
7b3605d73c5426f0600acd73535c1a7c96c4ffb9
23c7102024d4aeb4b641db7370083a87586dea43
3fa47cce74af2e39a67d3bf559d8ba2c81688963
280ed99d30b701b97d436b3ac57231e9e38e8a4a
6706a6c6c7ea2f4cfe1eb8dd786427675c4cbb4b
a8a39e52b08763ce1610400f0e789b798e89b885
2d70d150c52804485bc04367155ae4a2ff89768f
28547a874f734dc7062c859e8409a39d7903f9f1
8906ee2fcc0f62f782a9c95557bb785e9145cc33
cec120769a81c544ff171ff21c5b66217103f038
f6a714ad3b43963fe38ab3541286f9440ae96d16
a715a608f9baf1c26e0c59c72592a2b19412270b
30f7286d1100f4c115add1df87312e00a6b71012
059c6aa8b39796b9e6c10a70ac84a209eeed3c81
f9f982ba4ea5906e455cef05036700948ed4c576
""".split('\n')
class TestBinary(unittest.TestCase):
def setUp(self):
self.logfile = tempfile.mkstemp()[1]
self.hashfile = tempfile.mkstemp()[1]
def tearDown(self):
os.remove(self.logfile)
os.remove(self.hashfile)
def test_binary(self):
writer = sparkey.HashWriter(self.hashfile, self.logfile)
for key in keys:
writer.put(binascii.unhexlify(key), 'value')
writer.close()
reader = sparkey.HashReader(self.hashfile, self.logfile)
for key in keys:
self.assertEqual('value', reader[binascii.unhexlify(key)])
reader.close()
| pombredanne/sparkey-python | test/binary_test.py | Python | apache-2.0 | 2,128 |
import inspect
import example
print(inspect.getsource(example.A.get_name))
| jasonwee/asus-rt-n14uhp-mrtg | src/lesson_language_tools/inspect_getsource_method.py | Python | apache-2.0 | 76 |
"""
Exon IDs of the TP53 gene and one of its transcripts (TP53-026) were copied
from the Ensembl website, make sure same IDs are found by pyensembl.
"""
from __future__ import absolute_import
from pyensembl import cached_release
ensembl = cached_release(77)
# all exons associated with TP53 gene in Ensembl release 77
TP53_EXON_IDS_RELEASE_77 = [
'ENSE00002337729', 'ENSE00002419584',
'ENSE00003625790', 'ENSE00003518480',
'ENSE00003723991', 'ENSE00003712342',
'ENSE00001657961', 'ENSE00003725258',
'ENSE00003740946', 'ENSE00002204316',
'ENSE00002064269', 'ENSE00003750554',
'ENSE00003634848', 'ENSE00003492844',
'ENSE00003735852', 'ENSE00003545950',
'ENSE00003605891', 'ENSE00002051192',
'ENSE00002084733', 'ENSE00003726882',
'ENSE00001146308', 'ENSE00002667911',
'ENSE00003752869', 'ENSE00003739898',
'ENSE00003753508', 'ENSE00002034209',
'ENSE00002030826', 'ENSE00001596491',
'ENSE00002037735', 'ENSE00003736616',
'ENSE00002672443', 'ENSE00002226620',
'ENSE00003715195', 'ENSE00003750794',
'ENSE00003745267', 'ENSE00003746220',
'ENSE00003656695', 'ENSE00003669712',
'ENSE00002051873', 'ENSE00002048269',
'ENSE00002670535', 'ENSE00002677565',
'ENSE00003532881', 'ENSE00003520683',
'ENSE00002076714', 'ENSE00002062958',
'ENSE00002073243', 'ENSE00003670707',
'ENSE00002065802', 'ENSE00002362269'
]
def test_exon_ids_of_gene_id():
"""
test_exon_ids_of_gene_id: Ensure that gene_id ENSG00000141510 (name=TP53),
has all the same exon IDs found on the Ensembl website.
"""
exon_ids = ensembl.exon_ids_of_gene_id('ENSG00000141510')
assert len(exon_ids) == len(TP53_EXON_IDS_RELEASE_77), \
"Wrong number of exons, expected %d but got %d (n_distinct=%d)" % (
len(TP53_EXON_IDS_RELEASE_77),
len(exon_ids),
len(set(exon_ids)))
assert all(exon_id in TP53_EXON_IDS_RELEASE_77 for exon_id in exon_ids)
def test_exon_ids_of_gene_name():
"""
test_exon_ids_of_gene_name: Ensure that TP53 has the same exon IDs found
on the Ensembl website.
"""
exon_ids = ensembl.exon_ids_of_gene_name("TP53")
assert len(exon_ids) == len(TP53_EXON_IDS_RELEASE_77), \
"Wrong number of exons, expected %d but got %d (n_distinct=%d)" % (
len(TP53_EXON_IDS_RELEASE_77),
len(exon_ids),
len(set(exon_ids)))
assert all(exon_id in TP53_EXON_IDS_RELEASE_77 for exon_id in exon_ids)
# Exon IDs of transcript TP53-026
TP53_TRANSCRIPT_26_EXON_IDS_RELEASE_77 = [
'ENSE00002064269',
'ENSE00003723991',
'ENSE00003712342',
'ENSE00003725258',
'ENSE00003740946',
'ENSE00003750554',
'ENSE00003634848',
'ENSE00003492844'
]
def test_exon_ids_of_transcript_name():
"""
test_exon_ids_of_transcript_name : Look up exon IDs of transcript TP53-026
by name and ensure that the exon IDs match what we find on Ensembl's website
for release 77
"""
exon_ids = ensembl.exon_ids_of_transcript_name("TP53-026")
assert len(exon_ids) == len(TP53_TRANSCRIPT_26_EXON_IDS_RELEASE_77), \
"Expected %d exons, got %d" % (
len(TP53_TRANSCRIPT_26_EXON_IDS_RELEASE_77),
len(exon_ids))
assert all(
exon_id in TP53_TRANSCRIPT_26_EXON_IDS_RELEASE_77
for exon_id in exon_ids)
def exon_ids_of_transcript_id():
"""
exon_ids_of_transcript_id : Look up exon IDs of transcript
ENST00000610623 (name: TP53-026) by its ID and make sure they match
what we find on the Ensembl website.
"""
exon_ids = ensembl.exon_ids_of_transcript_id("ENST00000610623")
assert len(exon_ids) == len(TP53_TRANSCRIPT_26_EXON_IDS_RELEASE_77), \
"Expected %d exons, got %d" % (
len(TP53_TRANSCRIPT_26_EXON_IDS_RELEASE_77),
len(exon_ids))
assert all(
exon_id in TP53_TRANSCRIPT_26_EXON_IDS_RELEASE_77
for exon_id in exon_ids)
| hammerlab/pyensembl | test/test_exon_id.py | Python | apache-2.0 | 3,958 |
"""Weather component that handles meteorological data for your location."""
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Mapping
from datetime import datetime
from typing import Any, cast
from pyclimacell.const import (
CURRENT,
DAILY,
FORECASTS,
HOURLY,
NOWCAST,
PrecipitationType,
WeatherCode,
)
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
WeatherEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_VERSION,
CONF_NAME,
LENGTH_INCHES,
LENGTH_MILES,
PRESSURE_INHG,
SPEED_KILOMETERS_PER_HOUR,
SPEED_MILES_PER_HOUR,
TEMP_FAHRENHEIT,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.sun import is_up
from homeassistant.util import dt as dt_util
from homeassistant.util.speed import convert as speed_convert
from . import ClimaCellDataUpdateCoordinator, ClimaCellEntity
from .const import (
ATTR_CLOUD_COVER,
ATTR_PRECIPITATION_TYPE,
ATTR_WIND_GUST,
CC_ATTR_CLOUD_COVER,
CC_ATTR_CONDITION,
CC_ATTR_HUMIDITY,
CC_ATTR_OZONE,
CC_ATTR_PRECIPITATION,
CC_ATTR_PRECIPITATION_PROBABILITY,
CC_ATTR_PRECIPITATION_TYPE,
CC_ATTR_PRESSURE,
CC_ATTR_TEMPERATURE,
CC_ATTR_TEMPERATURE_HIGH,
CC_ATTR_TEMPERATURE_LOW,
CC_ATTR_TIMESTAMP,
CC_ATTR_VISIBILITY,
CC_ATTR_WIND_DIRECTION,
CC_ATTR_WIND_GUST,
CC_ATTR_WIND_SPEED,
CC_V3_ATTR_CLOUD_COVER,
CC_V3_ATTR_CONDITION,
CC_V3_ATTR_HUMIDITY,
CC_V3_ATTR_OZONE,
CC_V3_ATTR_PRECIPITATION,
CC_V3_ATTR_PRECIPITATION_DAILY,
CC_V3_ATTR_PRECIPITATION_PROBABILITY,
CC_V3_ATTR_PRECIPITATION_TYPE,
CC_V3_ATTR_PRESSURE,
CC_V3_ATTR_TEMPERATURE,
CC_V3_ATTR_TEMPERATURE_HIGH,
CC_V3_ATTR_TEMPERATURE_LOW,
CC_V3_ATTR_TIMESTAMP,
CC_V3_ATTR_VISIBILITY,
CC_V3_ATTR_WIND_DIRECTION,
CC_V3_ATTR_WIND_GUST,
CC_V3_ATTR_WIND_SPEED,
CLEAR_CONDITIONS,
CONDITIONS,
CONDITIONS_V3,
CONF_TIMESTEP,
DEFAULT_FORECAST_TYPE,
DOMAIN,
MAX_FORECASTS,
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up a config entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
api_version = config_entry.data[CONF_API_VERSION]
api_class = ClimaCellV3WeatherEntity if api_version == 3 else ClimaCellWeatherEntity
entities = [
api_class(config_entry, coordinator, api_version, forecast_type)
for forecast_type in (DAILY, HOURLY, NOWCAST)
]
async_add_entities(entities)
class BaseClimaCellWeatherEntity(ClimaCellEntity, WeatherEntity):
"""Base ClimaCell weather entity."""
_attr_temperature_unit = TEMP_FAHRENHEIT
_attr_pressure_unit = PRESSURE_INHG
_attr_wind_speed_unit = SPEED_MILES_PER_HOUR
_attr_visibility_unit = LENGTH_MILES
_attr_precipitation_unit = LENGTH_INCHES
def __init__(
self,
config_entry: ConfigEntry,
coordinator: ClimaCellDataUpdateCoordinator,
api_version: int,
forecast_type: str,
) -> None:
"""Initialize ClimaCell Weather Entity."""
super().__init__(config_entry, coordinator, api_version)
self.forecast_type = forecast_type
self._attr_entity_registry_enabled_default = (
forecast_type == DEFAULT_FORECAST_TYPE
)
self._attr_name = f"{config_entry.data[CONF_NAME]} - {forecast_type.title()}"
self._attr_unique_id = f"{config_entry.unique_id}_{forecast_type}"
@staticmethod
@abstractmethod
def _translate_condition(
condition: str | int | None, sun_is_up: bool = True
) -> str | None:
"""Translate ClimaCell condition into an HA condition."""
def _forecast_dict(
self,
forecast_dt: datetime,
use_datetime: bool,
condition: int | str,
precipitation: float | None,
precipitation_probability: float | None,
temp: float | None,
temp_low: float | None,
wind_direction: float | None,
wind_speed: float | None,
) -> dict[str, Any]:
"""Return formatted Forecast dict from ClimaCell forecast data."""
if use_datetime:
translated_condition = self._translate_condition(
condition, is_up(self.hass, forecast_dt)
)
else:
translated_condition = self._translate_condition(condition, True)
data = {
ATTR_FORECAST_TIME: forecast_dt.isoformat(),
ATTR_FORECAST_CONDITION: translated_condition,
ATTR_FORECAST_PRECIPITATION: precipitation,
ATTR_FORECAST_PRECIPITATION_PROBABILITY: precipitation_probability,
ATTR_FORECAST_TEMP: temp,
ATTR_FORECAST_TEMP_LOW: temp_low,
ATTR_FORECAST_WIND_BEARING: wind_direction,
ATTR_FORECAST_WIND_SPEED: wind_speed,
}
return {k: v for k, v in data.items() if v is not None}
@property
def extra_state_attributes(self) -> Mapping[str, Any] | None:
"""Return additional state attributes."""
wind_gust = self.wind_gust
if wind_gust and self.hass.config.units.is_metric:
wind_gust = round(
speed_convert(
self.wind_gust, SPEED_MILES_PER_HOUR, SPEED_KILOMETERS_PER_HOUR
),
4,
)
cloud_cover = self.cloud_cover
return {
ATTR_CLOUD_COVER: cloud_cover,
ATTR_WIND_GUST: wind_gust,
ATTR_PRECIPITATION_TYPE: self.precipitation_type,
}
@property
@abstractmethod
def cloud_cover(self):
"""Return cloud cover."""
@property
@abstractmethod
def wind_gust(self):
"""Return wind gust speed."""
@property
@abstractmethod
def precipitation_type(self):
"""Return precipitation type."""
@property
@abstractmethod
def pressure(self):
"""Return the raw pressure."""
@property
@abstractmethod
def wind_speed(self):
"""Return the raw wind speed."""
@property
@abstractmethod
def visibility(self):
"""Return the raw visibility."""
class ClimaCellWeatherEntity(BaseClimaCellWeatherEntity):
"""Entity that talks to ClimaCell v4 API to retrieve weather data."""
@staticmethod
def _translate_condition(
condition: int | str | None, sun_is_up: bool = True
) -> str | None:
"""Translate ClimaCell condition into an HA condition."""
if condition is None:
return None
# We won't guard here, instead we will fail hard
condition = WeatherCode(condition)
if condition in (WeatherCode.CLEAR, WeatherCode.MOSTLY_CLEAR):
if sun_is_up:
return CLEAR_CONDITIONS["day"]
return CLEAR_CONDITIONS["night"]
return CONDITIONS[condition]
@property
def temperature(self):
"""Return the platform temperature."""
return self._get_current_property(CC_ATTR_TEMPERATURE)
@property
def pressure(self):
"""Return the raw pressure."""
return self._get_current_property(CC_ATTR_PRESSURE)
@property
def humidity(self):
"""Return the humidity."""
return self._get_current_property(CC_ATTR_HUMIDITY)
@property
def wind_gust(self):
"""Return the wind gust speed."""
return self._get_current_property(CC_ATTR_WIND_GUST)
@property
def cloud_cover(self):
"""Reteurn the cloud cover."""
return self._get_current_property(CC_ATTR_CLOUD_COVER)
@property
def precipitation_type(self):
"""Return precipitation type."""
precipitation_type = self._get_current_property(CC_ATTR_PRECIPITATION_TYPE)
if precipitation_type is None:
return None
return PrecipitationType(precipitation_type).name.lower()
@property
def wind_speed(self):
"""Return the raw wind speed."""
return self._get_current_property(CC_ATTR_WIND_SPEED)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._get_current_property(CC_ATTR_WIND_DIRECTION)
@property
def ozone(self):
"""Return the O3 (ozone) level."""
return self._get_current_property(CC_ATTR_OZONE)
@property
def condition(self):
"""Return the condition."""
return self._translate_condition(
self._get_current_property(CC_ATTR_CONDITION),
is_up(self.hass),
)
@property
def visibility(self):
"""Return the raw visibility."""
return self._get_current_property(CC_ATTR_VISIBILITY)
@property
def forecast(self):
"""Return the forecast."""
# Check if forecasts are available
raw_forecasts = self.coordinator.data.get(FORECASTS, {}).get(self.forecast_type)
if not raw_forecasts:
return None
forecasts = []
max_forecasts = MAX_FORECASTS[self.forecast_type]
forecast_count = 0
# Set default values (in cases where keys don't exist), None will be
# returned. Override properties per forecast type as needed
for forecast in raw_forecasts:
forecast_dt = dt_util.parse_datetime(forecast[CC_ATTR_TIMESTAMP])
# Throw out past data
if forecast_dt.date() < dt_util.utcnow().date():
continue
values = forecast["values"]
use_datetime = True
condition = values.get(CC_ATTR_CONDITION)
precipitation = values.get(CC_ATTR_PRECIPITATION)
precipitation_probability = values.get(CC_ATTR_PRECIPITATION_PROBABILITY)
temp = values.get(CC_ATTR_TEMPERATURE_HIGH)
temp_low = None
wind_direction = values.get(CC_ATTR_WIND_DIRECTION)
wind_speed = values.get(CC_ATTR_WIND_SPEED)
if self.forecast_type == DAILY:
use_datetime = False
temp_low = values.get(CC_ATTR_TEMPERATURE_LOW)
if precipitation:
precipitation = precipitation * 24
elif self.forecast_type == NOWCAST:
# Precipitation is forecasted in CONF_TIMESTEP increments but in a
# per hour rate, so value needs to be converted to an amount.
if precipitation:
precipitation = (
precipitation / 60 * self._config_entry.options[CONF_TIMESTEP]
)
forecasts.append(
self._forecast_dict(
forecast_dt,
use_datetime,
condition,
precipitation,
precipitation_probability,
temp,
temp_low,
wind_direction,
wind_speed,
)
)
forecast_count += 1
if forecast_count == max_forecasts:
break
return forecasts
class ClimaCellV3WeatherEntity(BaseClimaCellWeatherEntity):
"""Entity that talks to ClimaCell v3 API to retrieve weather data."""
@staticmethod
def _translate_condition(
condition: int | str | None, sun_is_up: bool = True
) -> str | None:
"""Translate ClimaCell condition into an HA condition."""
if not condition:
return None
condition = cast(str, condition)
if "clear" in condition.lower():
if sun_is_up:
return CLEAR_CONDITIONS["day"]
return CLEAR_CONDITIONS["night"]
return CONDITIONS_V3[condition]
@property
def temperature(self):
"""Return the platform temperature."""
return self._get_cc_value(
self.coordinator.data[CURRENT], CC_V3_ATTR_TEMPERATURE
)
@property
def pressure(self):
"""Return the raw pressure."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_V3_ATTR_PRESSURE)
@property
def humidity(self):
"""Return the humidity."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_V3_ATTR_HUMIDITY)
@property
def wind_gust(self):
"""Return the wind gust speed."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_V3_ATTR_WIND_GUST)
@property
def cloud_cover(self):
"""Reteurn the cloud cover."""
return self._get_cc_value(
self.coordinator.data[CURRENT], CC_V3_ATTR_CLOUD_COVER
)
@property
def precipitation_type(self):
"""Return precipitation type."""
return self._get_cc_value(
self.coordinator.data[CURRENT], CC_V3_ATTR_PRECIPITATION_TYPE
)
@property
def wind_speed(self):
"""Return the raw wind speed."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_V3_ATTR_WIND_SPEED)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._get_cc_value(
self.coordinator.data[CURRENT], CC_V3_ATTR_WIND_DIRECTION
)
@property
def ozone(self):
"""Return the O3 (ozone) level."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_V3_ATTR_OZONE)
@property
def condition(self):
"""Return the condition."""
return self._translate_condition(
self._get_cc_value(self.coordinator.data[CURRENT], CC_V3_ATTR_CONDITION),
is_up(self.hass),
)
@property
def visibility(self):
"""Return the raw visibility."""
return self._get_cc_value(self.coordinator.data[CURRENT], CC_V3_ATTR_VISIBILITY)
@property
def forecast(self):
"""Return the forecast."""
# Check if forecasts are available
raw_forecasts = self.coordinator.data.get(FORECASTS, {}).get(self.forecast_type)
if not raw_forecasts:
return None
forecasts = []
# Set default values (in cases where keys don't exist), None will be
# returned. Override properties per forecast type as needed
for forecast in raw_forecasts:
forecast_dt = dt_util.parse_datetime(
self._get_cc_value(forecast, CC_V3_ATTR_TIMESTAMP)
)
use_datetime = True
condition = self._get_cc_value(forecast, CC_V3_ATTR_CONDITION)
precipitation = self._get_cc_value(forecast, CC_V3_ATTR_PRECIPITATION)
precipitation_probability = self._get_cc_value(
forecast, CC_V3_ATTR_PRECIPITATION_PROBABILITY
)
temp = self._get_cc_value(forecast, CC_V3_ATTR_TEMPERATURE)
temp_low = None
wind_direction = self._get_cc_value(forecast, CC_V3_ATTR_WIND_DIRECTION)
wind_speed = self._get_cc_value(forecast, CC_V3_ATTR_WIND_SPEED)
if self.forecast_type == DAILY:
use_datetime = False
forecast_dt = dt_util.start_of_local_day(forecast_dt)
precipitation = self._get_cc_value(
forecast, CC_V3_ATTR_PRECIPITATION_DAILY
)
temp = next(
(
self._get_cc_value(item, CC_V3_ATTR_TEMPERATURE_HIGH)
for item in forecast[CC_V3_ATTR_TEMPERATURE]
if "max" in item
),
temp,
)
temp_low = next(
(
self._get_cc_value(item, CC_V3_ATTR_TEMPERATURE_LOW)
for item in forecast[CC_V3_ATTR_TEMPERATURE]
if "min" in item
),
temp_low,
)
elif self.forecast_type == NOWCAST and precipitation:
# Precipitation is forecasted in CONF_TIMESTEP increments but in a
# per hour rate, so value needs to be converted to an amount.
precipitation = (
precipitation / 60 * self._config_entry.options[CONF_TIMESTEP]
)
forecasts.append(
self._forecast_dict(
forecast_dt,
use_datetime,
condition,
precipitation,
precipitation_probability,
temp,
temp_low,
wind_direction,
wind_speed,
)
)
return forecasts
| mezz64/home-assistant | homeassistant/components/climacell/weather.py | Python | apache-2.0 | 17,080 |
from unittest import mock
from test import support
from test.test_httpservers import NoLogRequestHandler
from unittest import TestCase
from wsgiref.util import setup_testing_defaults
from wsgiref.headers import Headers
from wsgiref.handlers import BaseHandler, BaseCGIHandler, SimpleHandler
from wsgiref import util
from wsgiref.validate import validator
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from wsgiref.simple_server import make_server
from http.client import HTTPConnection
from io import StringIO, BytesIO, BufferedReader
from socketserver import BaseServer
from platform import python_implementation
import os
import re
import signal
import sys
import unittest
class MockServer(WSGIServer):
"""Non-socket HTTP server"""
def __init__(self, server_address, RequestHandlerClass):
BaseServer.__init__(self, server_address, RequestHandlerClass)
self.server_bind()
def server_bind(self):
host, port = self.server_address
self.server_name = host
self.server_port = port
self.setup_environ()
class MockHandler(WSGIRequestHandler):
"""Non-socket HTTP handler"""
def setup(self):
self.connection = self.request
self.rfile, self.wfile = self.connection
def finish(self):
pass
def hello_app(environ,start_response):
start_response("200 OK", [
('Content-Type','text/plain'),
('Date','Mon, 05 Jun 2006 18:49:54 GMT')
])
return [b"Hello, world!"]
def header_app(environ, start_response):
start_response("200 OK", [
('Content-Type', 'text/plain'),
('Date', 'Mon, 05 Jun 2006 18:49:54 GMT')
])
return [';'.join([
environ['HTTP_X_TEST_HEADER'], environ['QUERY_STRING'],
environ['PATH_INFO']
]).encode('iso-8859-1')]
def run_amock(app=hello_app, data=b"GET / HTTP/1.0\n\n"):
server = make_server("", 80, app, MockServer, MockHandler)
inp = BufferedReader(BytesIO(data))
out = BytesIO()
olderr = sys.stderr
err = sys.stderr = StringIO()
try:
server.finish_request((inp, out), ("127.0.0.1",8888))
finally:
sys.stderr = olderr
return out.getvalue(), err.getvalue()
def compare_generic_iter(make_it,match):
"""Utility to compare a generic 2.1/2.2+ iterator with an iterable
If running under Python 2.2+, this tests the iterator using iter()/next(),
as well as __getitem__. 'make_it' must be a function returning a fresh
iterator to be tested (since this may test the iterator twice)."""
it = make_it()
n = 0
for item in match:
if not it[n]==item: raise AssertionError
n+=1
try:
it[n]
except IndexError:
pass
else:
raise AssertionError("Too many items from __getitem__",it)
try:
iter, StopIteration
except NameError:
pass
else:
# Only test iter mode under 2.2+
it = make_it()
if not iter(it) is it: raise AssertionError
for item in match:
if not next(it) == item: raise AssertionError
try:
next(it)
except StopIteration:
pass
else:
raise AssertionError("Too many items from .__next__()", it)
class IntegrationTests(TestCase):
def check_hello(self, out, has_length=True):
pyver = (python_implementation() + "/" +
sys.version.split()[0])
self.assertEqual(out,
("HTTP/1.0 200 OK\r\n"
"Server: WSGIServer/0.2 " + pyver +"\r\n"
"Content-Type: text/plain\r\n"
"Date: Mon, 05 Jun 2006 18:49:54 GMT\r\n" +
(has_length and "Content-Length: 13\r\n" or "") +
"\r\n"
"Hello, world!").encode("iso-8859-1")
)
def test_plain_hello(self):
out, err = run_amock()
self.check_hello(out)
def test_environ(self):
request = (
b"GET /p%61th/?query=test HTTP/1.0\n"
b"X-Test-Header: Python test \n"
b"X-Test-Header: Python test 2\n"
b"Content-Length: 0\n\n"
)
out, err = run_amock(header_app, request)
self.assertEqual(
out.splitlines()[-1],
b"Python test,Python test 2;query=test;/path/"
)
def test_request_length(self):
out, err = run_amock(data=b"GET " + (b"x" * 65537) + b" HTTP/1.0\n\n")
self.assertEqual(out.splitlines()[0],
b"HTTP/1.0 414 Request-URI Too Long")
def test_validated_hello(self):
out, err = run_amock(validator(hello_app))
# the middleware doesn't support len(), so content-length isn't there
self.check_hello(out, has_length=False)
def test_simple_validation_error(self):
def bad_app(environ,start_response):
start_response("200 OK", ('Content-Type','text/plain'))
return ["Hello, world!"]
out, err = run_amock(validator(bad_app))
self.assertTrue(out.endswith(
b"A server error occurred. Please contact the administrator."
))
self.assertEqual(
err.splitlines()[-2],
"AssertionError: Headers (('Content-Type', 'text/plain')) must"
" be of type list: <class 'tuple'>"
)
def test_status_validation_errors(self):
def create_bad_app(status):
def bad_app(environ, start_response):
start_response(status, [("Content-Type", "text/plain; charset=utf-8")])
return [b"Hello, world!"]
return bad_app
tests = [
('200', 'AssertionError: Status must be at least 4 characters'),
('20X OK', 'AssertionError: Status message must begin w/3-digit code'),
('200OK', 'AssertionError: Status message must have a space after code'),
]
for status, exc_message in tests:
with self.subTest(status=status):
out, err = run_amock(create_bad_app(status))
self.assertTrue(out.endswith(
b"A server error occurred. Please contact the administrator."
))
self.assertEqual(err.splitlines()[-2], exc_message)
def test_wsgi_input(self):
def bad_app(e,s):
e["wsgi.input"].read()
s("200 OK", [("Content-Type", "text/plain; charset=utf-8")])
return [b"data"]
out, err = run_amock(validator(bad_app))
self.assertTrue(out.endswith(
b"A server error occurred. Please contact the administrator."
))
self.assertEqual(
err.splitlines()[-2], "AssertionError"
)
def test_bytes_validation(self):
def app(e, s):
s("200 OK", [
("Content-Type", "text/plain; charset=utf-8"),
("Date", "Wed, 24 Dec 2008 13:29:32 GMT"),
])
return [b"data"]
out, err = run_amock(validator(app))
self.assertTrue(err.endswith('"GET / HTTP/1.0" 200 4\n'))
ver = sys.version.split()[0].encode('ascii')
py = python_implementation().encode('ascii')
pyver = py + b"/" + ver
self.assertEqual(
b"HTTP/1.0 200 OK\r\n"
b"Server: WSGIServer/0.2 "+ pyver + b"\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Date: Wed, 24 Dec 2008 13:29:32 GMT\r\n"
b"\r\n"
b"data",
out)
def test_cp1252_url(self):
def app(e, s):
s("200 OK", [
("Content-Type", "text/plain"),
("Date", "Wed, 24 Dec 2008 13:29:32 GMT"),
])
# PEP3333 says environ variables are decoded as latin1.
# Encode as latin1 to get original bytes
return [e["PATH_INFO"].encode("latin1")]
out, err = run_amock(
validator(app), data=b"GET /\x80%80 HTTP/1.0")
self.assertEqual(
[
b"HTTP/1.0 200 OK",
mock.ANY,
b"Content-Type: text/plain",
b"Date: Wed, 24 Dec 2008 13:29:32 GMT",
b"",
b"/\x80\x80",
],
out.splitlines())
def test_interrupted_write(self):
# BaseHandler._write() and _flush() have to write all data, even if
# it takes multiple send() calls. Test this by interrupting a send()
# call with a Unix signal.
threading = support.import_module("threading")
pthread_kill = support.get_attribute(signal, "pthread_kill")
def app(environ, start_response):
start_response("200 OK", [])
return [bytes(support.SOCK_MAX_SIZE)]
class WsgiHandler(NoLogRequestHandler, WSGIRequestHandler):
pass
server = make_server(support.HOST, 0, app, handler_class=WsgiHandler)
self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
interrupted.set()
original = signal.signal(signal.SIGUSR1, signal_handler)
self.addCleanup(signal.signal, signal.SIGUSR1, original)
received = None
main_thread = threading.get_ident()
def run_client():
http = HTTPConnection(*server.server_address)
http.request("GET", "/")
with http.getresponse() as response:
response.read(100)
# The main thread should now be blocking in a send() system
# call. But in theory, it could get interrupted by other
# signals, and then retried. So keep sending the signal in a
# loop, in case an earlier signal happens to be delivered at
# an inconvenient moment.
while True:
pthread_kill(main_thread, signal.SIGUSR1)
if interrupted.wait(timeout=float(1)):
break
nonlocal received
received = len(response.read())
http.close()
background = threading.Thread(target=run_client)
background.start()
server.handle_request()
background.join()
self.assertEqual(received, support.SOCK_MAX_SIZE - 100)
class UtilityTests(TestCase):
def checkShift(self,sn_in,pi_in,part,sn_out,pi_out):
env = {'SCRIPT_NAME':sn_in,'PATH_INFO':pi_in}
util.setup_testing_defaults(env)
self.assertEqual(util.shift_path_info(env),part)
self.assertEqual(env['PATH_INFO'],pi_out)
self.assertEqual(env['SCRIPT_NAME'],sn_out)
return env
def checkDefault(self, key, value, alt=None):
# Check defaulting when empty
env = {}
util.setup_testing_defaults(env)
if isinstance(value, StringIO):
self.assertIsInstance(env[key], StringIO)
elif isinstance(value,BytesIO):
self.assertIsInstance(env[key],BytesIO)
else:
self.assertEqual(env[key], value)
# Check existing value
env = {key:alt}
util.setup_testing_defaults(env)
self.assertIs(env[key], alt)
def checkCrossDefault(self,key,value,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(kw[key],value)
def checkAppURI(self,uri,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.application_uri(kw),uri)
def checkReqURI(self,uri,query=1,**kw):
util.setup_testing_defaults(kw)
self.assertEqual(util.request_uri(kw,query),uri)
def checkFW(self,text,size,match):
def make_it(text=text,size=size):
return util.FileWrapper(StringIO(text),size)
compare_generic_iter(make_it,match)
it = make_it()
self.assertFalse(it.filelike.closed)
for item in it:
pass
self.assertFalse(it.filelike.closed)
it.close()
self.assertTrue(it.filelike.closed)
def testSimpleShifts(self):
self.checkShift('','/', '', '/', '')
self.checkShift('','/x', 'x', '/x', '')
self.checkShift('/','', None, '/', '')
self.checkShift('/a','/x/y', 'x', '/a/x', '/y')
self.checkShift('/a','/x/', 'x', '/a/x', '/')
def testNormalizedShifts(self):
self.checkShift('/a/b', '/../y', '..', '/a', '/y')
self.checkShift('', '/../y', '..', '', '/y')
self.checkShift('/a/b', '//y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '//y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '/./y', 'y', '/a/b/y', '')
self.checkShift('/a/b', '/./y/', 'y', '/a/b/y', '/')
self.checkShift('/a/b', '///./..//y/.//', '..', '/a', '/y/')
self.checkShift('/a/b', '///', '', '/a/b/', '')
self.checkShift('/a/b', '/.//', '', '/a/b/', '')
self.checkShift('/a/b', '/x//', 'x', '/a/b/x', '/')
self.checkShift('/a/b', '/.', None, '/a/b', '')
def testDefaults(self):
for key, value in [
('SERVER_NAME','127.0.0.1'),
('SERVER_PORT', '80'),
('SERVER_PROTOCOL','HTTP/1.0'),
('HTTP_HOST','127.0.0.1'),
('REQUEST_METHOD','GET'),
('SCRIPT_NAME',''),
('PATH_INFO','/'),
('wsgi.version', (1,0)),
('wsgi.run_once', 0),
('wsgi.multithread', 0),
('wsgi.multiprocess', 0),
('wsgi.input', BytesIO()),
('wsgi.errors', StringIO()),
('wsgi.url_scheme','http'),
]:
self.checkDefault(key,value)
def testCrossDefaults(self):
self.checkCrossDefault('HTTP_HOST',"foo.bar",SERVER_NAME="foo.bar")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="on")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="1")
self.checkCrossDefault('wsgi.url_scheme',"https",HTTPS="yes")
self.checkCrossDefault('wsgi.url_scheme',"http",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"80",HTTPS="foo")
self.checkCrossDefault('SERVER_PORT',"443",HTTPS="on")
def testGuessScheme(self):
self.assertEqual(util.guess_scheme({}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"foo"}), "http")
self.assertEqual(util.guess_scheme({'HTTPS':"on"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"yes"}), "https")
self.assertEqual(util.guess_scheme({'HTTPS':"1"}), "https")
def testAppURIs(self):
self.checkAppURI("http://127.0.0.1/")
self.checkAppURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkAppURI("http://127.0.0.1/sp%E4m", SCRIPT_NAME="/sp\xe4m")
self.checkAppURI("http://spam.example.com:2071/",
HTTP_HOST="spam.example.com:2071", SERVER_PORT="2071")
self.checkAppURI("http://spam.example.com/",
SERVER_NAME="spam.example.com")
self.checkAppURI("http://127.0.0.1/",
HTTP_HOST="127.0.0.1", SERVER_NAME="spam.example.com")
self.checkAppURI("https://127.0.0.1/", HTTPS="on")
self.checkAppURI("http://127.0.0.1:8000/", SERVER_PORT="8000",
HTTP_HOST=None)
def testReqURIs(self):
self.checkReqURI("http://127.0.0.1/")
self.checkReqURI("http://127.0.0.1/spam", SCRIPT_NAME="/spam")
self.checkReqURI("http://127.0.0.1/sp%E4m", SCRIPT_NAME="/sp\xe4m")
self.checkReqURI("http://127.0.0.1/spammity/spam",
SCRIPT_NAME="/spammity", PATH_INFO="/spam")
self.checkReqURI("http://127.0.0.1/spammity/sp%E4m",
SCRIPT_NAME="/spammity", PATH_INFO="/sp\xe4m")
self.checkReqURI("http://127.0.0.1/spammity/spam;ham",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;ham")
self.checkReqURI("http://127.0.0.1/spammity/spam;cookie=1234,5678",
SCRIPT_NAME="/spammity", PATH_INFO="/spam;cookie=1234,5678")
self.checkReqURI("http://127.0.0.1/spammity/spam?say=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam?s%E4y=ni",
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="s%E4y=ni")
self.checkReqURI("http://127.0.0.1/spammity/spam", 0,
SCRIPT_NAME="/spammity", PATH_INFO="/spam",QUERY_STRING="say=ni")
def testFileWrapper(self):
self.checkFW("xyz"*50, 120, ["xyz"*40,"xyz"*10])
def testHopByHop(self):
for hop in (
"Connection Keep-Alive Proxy-Authenticate Proxy-Authorization "
"TE Trailers Transfer-Encoding Upgrade"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertTrue(util.is_hop_by_hop(alt))
# Not comprehensive, just a few random header names
for hop in (
"Accept Cache-Control Date Pragma Trailer Via Warning"
).split():
for alt in hop, hop.title(), hop.upper(), hop.lower():
self.assertFalse(util.is_hop_by_hop(alt))
class HeaderTests(TestCase):
def testMappingInterface(self):
test = [('x','y')]
self.assertEqual(len(Headers()), 0)
self.assertEqual(len(Headers([])),0)
self.assertEqual(len(Headers(test[:])),1)
self.assertEqual(Headers(test[:]).keys(), ['x'])
self.assertEqual(Headers(test[:]).values(), ['y'])
self.assertEqual(Headers(test[:]).items(), test)
self.assertIsNot(Headers(test).items(), test) # must be copy!
h = Headers()
del h['foo'] # should not raise an error
h['Foo'] = 'bar'
for m in h.__contains__, h.get, h.get_all, h.__getitem__:
self.assertTrue(m('foo'))
self.assertTrue(m('Foo'))
self.assertTrue(m('FOO'))
self.assertFalse(m('bar'))
self.assertEqual(h['foo'],'bar')
h['foo'] = 'baz'
self.assertEqual(h['FOO'],'baz')
self.assertEqual(h.get_all('foo'),['baz'])
self.assertEqual(h.get("foo","whee"), "baz")
self.assertEqual(h.get("zoo","whee"), "whee")
self.assertEqual(h.setdefault("foo","whee"), "baz")
self.assertEqual(h.setdefault("zoo","whee"), "whee")
self.assertEqual(h["foo"],"baz")
self.assertEqual(h["zoo"],"whee")
def testRequireList(self):
self.assertRaises(TypeError, Headers, "foo")
def testExtras(self):
h = Headers()
self.assertEqual(str(h),'\r\n')
h.add_header('foo','bar',baz="spam")
self.assertEqual(h['foo'], 'bar; baz="spam"')
self.assertEqual(str(h),'foo: bar; baz="spam"\r\n\r\n')
h.add_header('Foo','bar',cheese=None)
self.assertEqual(h.get_all('foo'),
['bar; baz="spam"', 'bar; cheese'])
self.assertEqual(str(h),
'foo: bar; baz="spam"\r\n'
'Foo: bar; cheese\r\n'
'\r\n'
)
class ErrorHandler(BaseCGIHandler):
"""Simple handler subclass for testing BaseHandler"""
# BaseHandler records the OS environment at import time, but envvars
# might have been changed later by other tests, which trips up
# HandlerTests.testEnviron().
os_environ = dict(os.environ.items())
def __init__(self,**kw):
setup_testing_defaults(kw)
BaseCGIHandler.__init__(
self, BytesIO(), BytesIO(), StringIO(), kw,
multithread=True, multiprocess=True
)
class TestHandler(ErrorHandler):
"""Simple handler subclass for testing BaseHandler, w/error passthru"""
def handle_error(self):
raise # for testing, we want to see what's happening
class HandlerTests(TestCase):
def checkEnvironAttrs(self, handler):
env = handler.environ
for attr in [
'version','multithread','multiprocess','run_once','file_wrapper'
]:
if attr=='file_wrapper' and handler.wsgi_file_wrapper is None:
continue
self.assertEqual(getattr(handler,'wsgi_'+attr),env['wsgi.'+attr])
def checkOSEnviron(self,handler):
empty = {}; setup_testing_defaults(empty)
env = handler.environ
from os import environ
for k,v in environ.items():
if k not in empty:
self.assertEqual(env[k],v)
for k,v in empty.items():
self.assertIn(k, env)
def testEnviron(self):
h = TestHandler(X="Y")
h.setup_environ()
self.checkEnvironAttrs(h)
self.checkOSEnviron(h)
self.assertEqual(h.environ["X"],"Y")
def testCGIEnviron(self):
h = BaseCGIHandler(None,None,None,{})
h.setup_environ()
for key in 'wsgi.url_scheme', 'wsgi.input', 'wsgi.errors':
self.assertIn(key, h.environ)
def testScheme(self):
h=TestHandler(HTTPS="on"); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'https')
h=TestHandler(); h.setup_environ()
self.assertEqual(h.environ['wsgi.url_scheme'],'http')
def testAbstractMethods(self):
h = BaseHandler()
for name in [
'_flush','get_stdin','get_stderr','add_cgi_vars'
]:
self.assertRaises(NotImplementedError, getattr(h,name))
self.assertRaises(NotImplementedError, h._write, "test")
def testContentLength(self):
# Demo one reason iteration is better than write()... ;)
def trivial_app1(e,s):
s('200 OK',[])
return [e['wsgi.url_scheme'].encode('iso-8859-1')]
def trivial_app2(e,s):
s('200 OK',[])(e['wsgi.url_scheme'].encode('iso-8859-1'))
return []
def trivial_app3(e,s):
s('200 OK',[])
return ['\u0442\u0435\u0441\u0442'.encode("utf-8")]
def trivial_app4(e,s):
# Simulate a response to a HEAD request
s('200 OK',[('Content-Length', '12345')])
return []
h = TestHandler()
h.run(trivial_app1)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"Content-Length: 4\r\n"
"\r\n"
"http").encode("iso-8859-1"))
h = TestHandler()
h.run(trivial_app2)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"\r\n"
"http").encode("iso-8859-1"))
h = TestHandler()
h.run(trivial_app3)
self.assertEqual(h.stdout.getvalue(),
b'Status: 200 OK\r\n'
b'Content-Length: 8\r\n'
b'\r\n'
b'\xd1\x82\xd0\xb5\xd1\x81\xd1\x82')
h = TestHandler()
h.run(trivial_app4)
self.assertEqual(h.stdout.getvalue(),
b'Status: 200 OK\r\n'
b'Content-Length: 12345\r\n'
b'\r\n')
def testBasicErrorOutput(self):
def non_error_app(e,s):
s('200 OK',[])
return []
def error_app(e,s):
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(non_error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"Content-Length: 0\r\n"
"\r\n").encode("iso-8859-1"))
self.assertEqual(h.stderr.getvalue(),"")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: %s\r\n"
"Content-Type: text/plain\r\n"
"Content-Length: %d\r\n"
"\r\n" % (h.error_status,len(h.error_body))).encode('iso-8859-1')
+ h.error_body)
self.assertIn("AssertionError", h.stderr.getvalue())
def testErrorAfterOutput(self):
MSG = b"Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
raise AssertionError("This should be caught by handler")
h = ErrorHandler()
h.run(error_app)
self.assertEqual(h.stdout.getvalue(),
("Status: 200 OK\r\n"
"\r\n".encode("iso-8859-1")+MSG))
self.assertIn("AssertionError", h.stderr.getvalue())
def testHeaderFormats(self):
def non_error_app(e,s):
s('200 OK',[])
return []
stdpat = (
r"HTTP/%s 200 OK\r\n"
r"Date: \w{3}, [ 0123]\d \w{3} \d{4} \d\d:\d\d:\d\d GMT\r\n"
r"%s" r"Content-Length: 0\r\n" r"\r\n"
)
shortpat = (
"Status: 200 OK\r\n" "Content-Length: 0\r\n" "\r\n"
).encode("iso-8859-1")
for ssw in "FooBar/1.0", None:
sw = ssw and "Server: %s\r\n" % ssw or ""
for version in "1.0", "1.1":
for proto in "HTTP/0.9", "HTTP/1.0", "HTTP/1.1":
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = False
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
self.assertEqual(shortpat,h.stdout.getvalue())
h = TestHandler(SERVER_PROTOCOL=proto)
h.origin_server = True
h.http_version = version
h.server_software = ssw
h.run(non_error_app)
if proto=="HTTP/0.9":
self.assertEqual(h.stdout.getvalue(),b"")
else:
self.assertTrue(
re.match((stdpat%(version,sw)).encode("iso-8859-1"),
h.stdout.getvalue()),
((stdpat%(version,sw)).encode("iso-8859-1"),
h.stdout.getvalue())
)
def testBytesData(self):
def app(e, s):
s("200 OK", [
("Content-Type", "text/plain; charset=utf-8"),
])
return [b"data"]
h = TestHandler()
h.run(app)
self.assertEqual(b"Status: 200 OK\r\n"
b"Content-Type: text/plain; charset=utf-8\r\n"
b"Content-Length: 4\r\n"
b"\r\n"
b"data",
h.stdout.getvalue())
def testCloseOnError(self):
side_effects = {'close_called': False}
MSG = b"Some output has been sent"
def error_app(e,s):
s("200 OK",[])(MSG)
class CrashyIterable(object):
def __iter__(self):
while True:
yield b'blah'
raise AssertionError("This should be caught by handler")
def close(self):
side_effects['close_called'] = True
return CrashyIterable()
h = ErrorHandler()
h.run(error_app)
self.assertEqual(side_effects['close_called'], True)
def testPartialWrite(self):
written = bytearray()
class PartialWriter:
def write(self, b):
partial = b[:7]
written.extend(partial)
return len(partial)
def flush(self):
pass
environ = {"SERVER_PROTOCOL": "HTTP/1.0"}
h = SimpleHandler(BytesIO(), PartialWriter(), sys.stderr, environ)
msg = "should not do partial writes"
with self.assertWarnsRegex(DeprecationWarning, msg):
h.run(hello_app)
self.assertEqual(b"HTTP/1.0 200 OK\r\n"
b"Content-Type: text/plain\r\n"
b"Date: Mon, 05 Jun 2006 18:49:54 GMT\r\n"
b"Content-Length: 13\r\n"
b"\r\n"
b"Hello, world!",
written)
if __name__ == "__main__":
unittest.main()
| batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/test/test_wsgiref.py | Python | apache-2.0 | 27,734 |
"""
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
"""
import pytest
import salt.modules.modjk as modjk
from tests.support.mock import patch
@pytest.fixture
def configure_loader_modules():
return {modjk: {}}
def test_version():
"""
Test for return the modjk version
"""
with patch.object(
modjk, "_do_http", return_value={"worker.jk_version": "mod_jk/1.2.37"}
):
assert modjk.version() == "1.2.37"
def test_get_running():
"""
Test for get the current running config (not from disk)
"""
with patch.object(modjk, "_do_http", return_value={}):
assert modjk.get_running() == {}
def test_dump_config():
"""
Test for dump the original configuration that was loaded from disk
"""
with patch.object(modjk, "_do_http", return_value={}):
assert modjk.dump_config() == {}
def test_list_configured_members():
"""
Test for return a list of member workers from the configuration files
"""
with patch.object(modjk, "_do_http", return_value={}):
assert modjk.list_configured_members("loadbalancer1") == []
with patch.object(
modjk,
"_do_http",
return_value={"worker.loadbalancer1.balance_workers": "SALT"},
):
assert modjk.list_configured_members("loadbalancer1") == ["SALT"]
def test_workers():
"""
Test for return a list of member workers and their status
"""
with patch.object(modjk, "_do_http", return_value={"worker.list": "Salt1,Salt2"}):
assert modjk.workers() == {}
def test_recover_all():
"""
Test for set the all the workers in lbn to recover and
activate them if they are not
"""
with patch.object(modjk, "_do_http", return_value={}):
assert modjk.recover_all("loadbalancer1") == {}
with patch.object(
modjk,
"_do_http",
return_value={"worker.loadbalancer1.balance_workers": "SALT"},
):
with patch.object(
modjk,
"worker_status",
return_value={"activation": "ACT", "state": "OK"},
):
assert modjk.recover_all("loadbalancer1") == {
"SALT": {"activation": "ACT", "state": "OK"}
}
def test_reset_stats():
"""
Test for reset all runtime statistics for the load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.reset_stats("loadbalancer1")
def test_lb_edit():
"""
Test for edit the loadbalancer settings
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.lb_edit("loadbalancer1", {"vlr": 1, "vlt": 60})
def test_bulk_stop():
"""
Test for stop all the given workers in the specific load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.bulk_stop(["node1", "node2", "node3"], "loadbalancer1")
def test_bulk_activate():
"""
Test for activate all the given workers in the specific load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.bulk_activate(["node1", "node2", "node3"], "loadbalancer1")
def test_bulk_disable():
"""
Test for disable all the given workers in the specific load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.bulk_disable(["node1", "node2", "node3"], "loadbalancer1")
def test_bulk_recover():
"""
Test for recover all the given workers in the specific load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.bulk_recover(["node1", "node2", "node3"], "loadbalancer1")
def test_worker_status():
"""
Test for return the state of the worker
"""
with patch.object(
modjk,
"_do_http",
return_value={"worker.node1.activation": "ACT", "worker.node1.state": "OK"},
):
assert modjk.worker_status("node1") == {"activation": "ACT", "state": "OK"}
with patch.object(modjk, "_do_http", return_value={}):
assert not modjk.worker_status("node1")
def test_worker_recover():
"""
Test for set the worker to recover this module will fail
if it is in OK state
"""
with patch.object(modjk, "_do_http", return_value={}):
assert modjk.worker_recover("node1", "loadbalancer1") == {}
def test_worker_disable():
"""
Test for set the worker to disable state in the lbn load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.worker_disable("node1", "loadbalancer1")
def test_worker_activate():
"""
Test for set the worker to activate state in the lbn load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.worker_activate("node1", "loadbalancer1")
def test_worker_stop():
"""
Test for set the worker to stopped state in the lbn load balancer
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.worker_stop("node1", "loadbalancer1")
def test_worker_edit():
"""
Test for edit the worker settings
"""
with patch.object(modjk, "_do_http", return_value={"worker.result.type": "OK"}):
assert modjk.worker_edit("node1", "loadbalancer1", {"vwf": 500, "vwd": 60})
| saltstack/salt | tests/pytests/unit/modules/test_modjk.py | Python | apache-2.0 | 5,541 |
import tornado
import tornado.gen
from req import Service
from req import ApiRequestHandler
class Index(ApiRequestHandler):
@tornado.gen.coroutine
def get(self):
self.render()
| Tocknicsu/nctuoj_contest | backend/handler/index.py | Python | apache-2.0 | 195 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from datetime import datetime, timedelta
import time
import sys
import os
from apscheduler.schedulers.background import BackgroundScheduler
def tick():
print('Tick! The time is: %s' % datetime.now())
def alarm(time):
print('Alarm! This alarm was scheduled at %s.' % time)
def job_function():
print('Hello! The time is: %s' % datetime.now())
if __name__ == '__main__':
scheduler = BackgroundScheduler()
url = sys.argv[1] if len(sys.argv) > 1 else 'sqlite:///example.sqlite'
scheduler.add_jobstore('sqlalchemy', url=url)
#interval example
scheduler.add_job(tick, 'interval', seconds=5)
#at a specific time example
alarm_time = datetime.now() + timedelta(seconds=10)
scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()])
#add cron job
scheduler.add_job(job_function, 'cron', minute='37,39,40')
scheduler.start()
print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
scheduler.print_jobs()
jobs = scheduler.get_jobs()
for job in jobs:
print('job: %s trigger: %s nextrun: %s' % (job.name, job.trigger, job.next_run_time.strftime("%Y-%m-%d %H:%M:%S")))
try:
# This is here to simulate application activity (which keeps the main thread alive).
while True:
time.sleep(2)
except (KeyboardInterrupt, SystemExit):
# Not strictly necessary if daemonic mode is enabled but should be done if possible
scheduler.shutdown() | iagcl/data_pipeline | ui/app/scheduler_bg.py | Python | apache-2.0 | 2,334 |
# Copyright 2010-2021 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Patch to the python wrapper of ../linear_solver.h providing an algebraic API.
This is directly imported, and use exclusively in ./linear_solver.swig. See that
file.
For examples leveraging the code defined here, see ./pywraplp_test.py and
../../../python/linear_programming.py.
"""
import collections
import numbers
# The classes below allow linear expressions to be expressed naturally with the
# usual arithmetic operators +-*/ and with constant numbers, which makes the
# python API very intuitive. See the top-level comment for examples.
inf = float('inf')
class _FakeMPVariableRepresentingTheConstantOffset(object):
"""A dummy class for a singleton instance used to represent the constant.
To represent linear expressions, we store a dictionary
MPVariable->coefficient. To represent the constant offset of the expression,
we use this class as a substitute: its coefficient will be the offset. To
properly be evaluated, its solution_value() needs to be 1.
"""
def solution_value(self): # pylint: disable=invalid-name
return 1
def __repr__(self):
return 'OFFSET_KEY'
OFFSET_KEY = _FakeMPVariableRepresentingTheConstantOffset()
def CastToLinExp(v):
if isinstance(v, numbers.Number):
return Constant(v)
else:
return v
class LinearExpr(object):
"""Holds linear expressions.
A linear expression is essentially an offset (floating-point value), and a
dictionary mapping MPVariable objects to their coefficient (which is also a
floating-point value).
"""
OVERRIDDEN_OPERATOR_METHODS = [
'__%s__' % opname
for opname in ['add', 'radd', 'sub', 'rsub', 'mul', 'rmul', 'div',
'truediv', 'neg', 'eq', 'ge', 'le', 'gt', 'lt', 'ne']
]
def solution_value(self): # pylint: disable=invalid-name
"""Value of this linear expr, using the solution_value of its vars."""
coeffs = self.GetCoeffs()
return sum(var.solution_value() * coeff for var, coeff in coeffs.items())
def AddSelfToCoeffMapOrStack(self, coeffs, multiplier, stack):
"""Private function used by GetCoeffs() to delegate processing.
Implementation must either update coeffs or push to the stack a
sub-expression and the accumulated multiplier that applies to it.
Args:
coeffs: A dictionary of variables' coefficients. It is a defaultdict that
initializes the new values to 0 by default.
multiplier: The current accumulated multiplier to apply to this
expression.
stack: A list to append to if the current expression is composed of
sub-expressions. The elements of the stack are pair tuples
(multiplier, linear_expression).
"""
raise NotImplementedError
def GetCoeffs(self):
coeffs = collections.defaultdict(float)
stack = [(1.0, self)]
while stack:
current_multiplier, current_expression = stack.pop()
current_expression.AddSelfToCoeffMapOrStack(coeffs, current_multiplier,
stack)
return coeffs
def __add__(self, expr):
return Sum(self, expr)
def __radd__(self, cst):
return Sum(self, cst)
def __sub__(self, expr):
return Sum(self, -expr)
def __rsub__(self, cst):
return Sum(-self, cst)
def __mul__(self, cst):
return ProductCst(self, cst)
def __rmul__(self, cst):
return ProductCst(self, cst)
def __div__(self, cst):
return ProductCst(self, 1.0 / cst)
def __truediv__(self, cst):
return ProductCst(self, 1.0 / cst)
def __neg__(self):
return ProductCst(self, -1)
def __eq__(self, arg):
if isinstance(arg, numbers.Number):
return LinearConstraint(self, arg, arg)
else:
return LinearConstraint(self - arg, 0.0, 0.0)
def __ge__(self, arg):
if isinstance(arg, numbers.Number):
return LinearConstraint(self, arg, inf)
else:
return LinearConstraint(self - arg, 0.0, inf)
def __le__(self, arg):
if isinstance(arg, numbers.Number):
return LinearConstraint(self, -inf, arg)
else:
return LinearConstraint(self - arg, -inf, 0.0)
def __lt__(self, arg):
raise ValueError(
'Operators "<" and ">" not supported with the linear solver')
def __gt__(self, arg):
raise ValueError(
'Operators "<" and ">" not supported with the linear solver')
def __ne__(self, arg):
raise ValueError('Operator "!=" not supported with the linear solver')
class VariableExpr(LinearExpr):
"""Represents a LinearExpr containing only a single variable."""
def __init__(self, mpvar):
self.__var = mpvar
def AddSelfToCoeffMapOrStack(self, coeffs, multiplier, stack):
coeffs[self.__var] += multiplier
class ProductCst(LinearExpr):
"""Represents the product of a LinearExpr by a constant."""
def __init__(self, expr, coef):
self.__expr = CastToLinExp(expr)
if isinstance(coef, numbers.Number):
self.__coef = coef
else:
raise TypeError
def __str__(self):
if self.__coef == -1:
return '-' + str(self.__expr)
else:
return '(' + str(self.__coef) + ' * ' + str(self.__expr) + ')'
def AddSelfToCoeffMapOrStack(self, coeffs, multiplier, stack):
current_multiplier = multiplier * self.__coef
if current_multiplier:
stack.append((current_multiplier, self.__expr))
class Constant(LinearExpr):
def __init__(self, val):
self.__val = val
def __str__(self):
return str(self.__val)
def AddSelfToCoeffMapOrStack(self, coeffs, multiplier, stack):
coeffs[OFFSET_KEY] += self.__val * multiplier
class SumArray(LinearExpr):
"""Represents the sum of a list of LinearExpr."""
def __init__(self, array):
self.__array = [CastToLinExp(elem) for elem in array]
def __str__(self):
return '({})'.format(' + '.join(map(str, self.__array)))
def AddSelfToCoeffMapOrStack(self, coeffs, multiplier, stack):
# Append elements in reversed order so that the first popped from the stack
# in the next iteration of the evaluation loop will be the first item of the
# array. This keeps the end result of the floating point computation
# predictable from user perspective.
for arg in reversed(self.__array):
stack.append((multiplier, arg))
def Sum(*args):
return SumArray(args)
SumCst = Sum # pylint: disable=invalid-name
class LinearConstraint(object):
"""Represents a linear constraint: LowerBound <= LinearExpr <= UpperBound."""
def __init__(self, expr, lb, ub):
self.__expr = expr
self.__lb = lb
self.__ub = ub
def __str__(self):
if self.__lb > -inf and self.__ub < inf:
if self.__lb == self.__ub:
return str(self.__expr) + ' == ' + str(self.__lb)
else:
return (str(self.__lb) + ' <= ' + str(self.__expr) +
' <= ' + str(self.__ub))
elif self.__lb > -inf:
return str(self.__expr) + ' >= ' + str(self.__lb)
elif self.__ub < inf:
return str(self.__expr) + ' <= ' + str(self.__ub)
else:
return 'Trivial inequality (always true)'
def Extract(self, solver, name=''):
"""Performs the actual creation of the constraint object."""
coeffs = self.__expr.GetCoeffs()
constant = coeffs.pop(OFFSET_KEY, 0.0)
lb = -solver.infinity()
ub = solver.infinity()
if self.__lb > -inf:
lb = self.__lb - constant
if self.__ub < inf:
ub = self.__ub - constant
constraint = solver.RowConstraint(lb, ub, name)
for v, c, in coeffs.items():
constraint.SetCoefficient(v, float(c))
return constraint
| google/or-tools | ortools/linear_solver/linear_solver_natural_api.py | Python | apache-2.0 | 8,096 |
import functools
import re
import pytest
from autoray import do, lazy, to_numpy, infer_backend, get_dtype_name, astype
from numpy.testing import assert_allclose
from .test_autoray import BACKENDS, gen_rand
def test_manual_construct():
def foo(a, b, c):
a1, a2 = a
b1 = b['1']
c1, c2 = c['sub']
return do('sum', do('stack', (a1, a2, b1, c1, c2)), axis=0)
x = do('random.uniform', size=(5, 7), like='numpy')
x0 = lazy.array(x[0, :])
x1 = lazy.array(x[1, :])
x2 = lazy.array(x[2, :])
x3 = lazy.array(x[3, :])
x4 = lazy.array(x[4, :])
y = lazy.LazyArray(
backend=infer_backend(x),
fn=foo,
args=((x0, x1), {'1': x2}),
kwargs=dict(c={'sub': (x3, x4)}),
shape=(7,),
dtype='float64',
)
assert y.deps == (x0, x1, x2, x3, x4)
assert re.match(
r'x\d+ = foo\d+\(\(x\d+, x\d+,\), '
r'{1: x\d+}, c: {sub: \(x\d+, x\d+,\)}\)',
y.get_source()
)
assert_allclose(y.compute(), x.sum(0))
def modified_gram_schmidt(X):
Q = []
for j in range(0, X.shape[0]):
q = X[j, :]
for i in range(0, j):
rij = do("tensordot", do("conj", Q[i]), q, axes=1)
q = q - rij * Q[i]
rjj = do("linalg.norm", q, 2)
Q.append(q / rjj)
return do("stack", tuple(Q), axis=0)
def wrap_strict_check(larray):
fn_orig = larray._fn
@functools.wraps(fn_orig)
def checked(*args, **kwargs):
data = fn_orig(*args, **kwargs)
assert tuple(data.shape) == larray.shape
assert get_dtype_name(data) == larray.dtype
assert infer_backend(data) == larray.backend
return data
return checked
def make_strict(larray):
for node in larray:
larray._fn = wrap_strict_check(larray)
@pytest.mark.parametrize("backend", BACKENDS)
def test_lazy_mgs(backend):
if backend == "sparse":
pytest.xfail("Sparse doesn't support 'linalg.norm' yet...")
x = gen_rand((5, 5), backend)
lx = lazy.array(x)
ly = modified_gram_schmidt(lx)
make_strict(ly)
assert str(ly) == (
f"<LazyArray(fn=stack, shape=(5, 5), "
f"dtype=float64, backend='{backend}')>"
)
assert isinstance(ly, lazy.LazyArray)
hmax = ly.history_max_size()
hpeak = ly.history_peak_size()
htot = ly.history_total_size()
assert hmax == 25
assert 25 < hpeak < htot
assert len(tuple(ly)) == 57
assert len({node.fn_name for node in ly}) == 9
assert_allclose(to_numpy(ly.compute()), to_numpy(modified_gram_schmidt(x)))
with lazy.shared_intermediates():
ly = modified_gram_schmidt(lx)
make_strict(ly)
assert len(tuple(ly)) == 51
assert len({node.fn_name for node in ly}) == 9
assert_allclose(to_numpy(ly.compute()), to_numpy(modified_gram_schmidt(x)))
def test_partial_evaluation():
la = lazy.array(gen_rand((10, 10), "numpy"))
lb = lazy.array(gen_rand((10, 10), "numpy"))
lc = lazy.array(gen_rand((10, 10), "numpy"))
ld = lazy.array(gen_rand((10, 10), "numpy"))
lab = do("tanh", la @ lb)
lcd = lc @ ld
ls = lab + lcd
ld = do("abs", lab / lcd)
le = do("einsum", "ab,ba->a", ls, ld)
lf = do("sum", le)
make_strict(lf)
assert len(tuple(lf)) == 12
lf.compute_constants(variables=[lc, ld]) # constants = [la, lb]
assert len(tuple(lf)) == 9
assert "tanh" not in {node.fn_name for node in lf}
lf.compute()
def test_plot():
import matplotlib
matplotlib.use("Template")
la = lazy.array(gen_rand((10, 10), "numpy"))
lb = lazy.array(gen_rand((10, 10), "numpy"))
lc = lazy.array(gen_rand((10, 10), "numpy"))
ld = lazy.array(gen_rand((10, 10), "numpy"))
lab = do("tanh", la @ lb)
lcd = lc @ ld
ls = lab + lcd
ld = do("abs", lab / lcd)
le = do("einsum", "ab,ba->a", ls, ld)
lf = do("sum", le)
lf.plot()
lf.plot(variables=[lc, ld])
lf.plot_history_size_footprint()
def test_share_intermediates():
la = lazy.array(gen_rand((10, 10), "numpy"))
lb = lazy.array(gen_rand((10, 10), "numpy"))
l1 = do("tanh", la @ lb)
l2 = do("tanh", la @ lb)
ly = l1 + l2
assert len(tuple(ly)) == 7
y1 = ly.compute()
with lazy.shared_intermediates():
l1 = do("tanh", la @ lb)
l2 = do("tanh", la @ lb)
ly = l1 + l2
assert len(tuple(ly)) == 5
y2 = ly.compute()
assert_allclose(y1, y2)
@pytest.mark.parametrize("backend", BACKENDS)
def test_transpose_chain(backend):
lx = lazy.array(gen_rand((2, 3, 4, 5, 6), backend))
l1 = do("transpose", lx, (1, 0, 3, 2, 4))
l2 = do("transpose", l1, (1, 0, 3, 2, 4))
assert l2.args[0] is lx
assert l2.deps == (lx,)
assert len(tuple(l1)) == 2
assert len(tuple(l2)) == 2
assert_allclose(
to_numpy(lx.compute()), to_numpy(l2.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
def test_reshape_chain(backend):
lx = lazy.array(gen_rand((2, 3, 4, 5, 6), backend))
l1 = do("reshape", lx, (6, 4, 30))
l2 = do("reshape", l1, (-1,))
assert len(tuple(l1)) == 2
assert len(tuple(l2)) == 2
assert l2.args[0] is lx
assert l2.deps == (lx,)
assert_allclose(
to_numpy(lx.compute()).flatten(), to_numpy(l2.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_svd(backend, dtype):
if backend == "sparse":
pytest.xfail("Sparse doesn't support 'linalg.svd' yet...")
x = lazy.array(gen_rand((4, 5), backend, dtype))
U, s, VH = do("linalg.svd", x)
assert U.shape == (4, 4)
assert s.shape == (4,)
assert VH.shape == (4, 5)
s = astype(s, dtype)
ly = U @ (do("reshape", s, (-1, 1)) * VH)
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
def test_qr(backend):
if backend == "sparse":
pytest.xfail("Sparse doesn't support 'linalg.qr' yet...")
x = lazy.array(gen_rand((4, 5), backend))
Q, R = do("linalg.qr", x)
assert Q.shape == (4, 4)
assert R.shape == (4, 5)
ly = Q @ R
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_eig_inv(backend, dtype):
if backend in ("cupy", "dask", "torch", "mars", "sparse"):
pytest.xfail(f"{backend} doesn't support 'linalg.eig' yet...")
# N.B. the prob that a real gaussian matrix has all real eigenvalues is
# ``2**(-d * (d - 1) / 4)`` - see Edelman 1997 - so need ``d >> 5``
d = 20
x = lazy.array(gen_rand((d, d), backend, dtype))
el, ev = do("linalg.eig", x)
assert el.shape == (d,)
assert ev.shape == (d, d)
ly = ev @ (do("reshape", el, (-1, 1)) * do("linalg.inv", ev))
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_eigh(backend, dtype):
if backend in ("dask", "mars", "sparse",):
pytest.xfail(f"{backend} doesn't support 'linalg.eig' yet...")
x = lazy.array(gen_rand((5, 5), backend, dtype))
x = x + x.H
el, ev = do("linalg.eigh", x)
assert get_dtype_name(ev) == dtype
assert el.shape == (5,)
assert ev.shape == (5, 5)
ly = ev @ (do("reshape", el, (-1, 1)) * ev.H)
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_cholesky(backend, dtype):
if backend in ("sparse",):
pytest.xfail(f"{backend} doesn't support 'linalg.cholesky' yet...")
x = lazy.array(gen_rand((5, 5), backend, dtype))
x = x @ x.H
C = do("linalg.cholesky", x)
assert C.shape == (5, 5)
ly = C @ C.H
make_strict(ly)
assert_allclose(
to_numpy(x.compute()), to_numpy(ly.compute()),
)
@pytest.mark.parametrize("backend", BACKENDS)
@pytest.mark.parametrize("dtype", ["float64", "complex128"])
def test_solve(backend, dtype):
if backend in ("sparse",):
pytest.xfail(f"{backend} doesn't support 'linalg.solve' yet...")
A = lazy.array(gen_rand((5, 5), backend, dtype))
y = lazy.array(gen_rand((5,), backend, dtype))
x = do("linalg.solve", A, y)
assert x.shape == (5,)
# tensorflow e.g. doesn't allow ``A @ x`` for vector x ...
ly = do("tensordot", A, x, axes=1)
make_strict(ly)
assert_allclose(
to_numpy(y.compute()), to_numpy(ly.compute()),
)
def test_dunder_magic():
a = do('random.uniform', size=(), like='numpy')
b = lazy.array(a)
x, y, z = do('random.uniform', size=(3), like='numpy')
a = x * a
b = x * b
a = a * y
b = b * y
a *= z
b *= z
assert_allclose(a, b.compute())
a = do('random.uniform', size=(), like='numpy')
b = lazy.array(a)
x, y, z = do('random.uniform', size=(3), like='numpy')
a = x + a
b = x + b
a = a + y
b = b + y
a += z
b += z
assert_allclose(a, b.compute())
a = do('random.uniform', size=(), like='numpy')
b = lazy.array(a)
x, y, z = do('random.uniform', size=(3), like='numpy')
a = x - a
b = x - b
a = a - y
b = b - y
a -= z
b -= z
assert_allclose(a, b.compute())
a = do('random.uniform', size=(), like='numpy')
b = lazy.array(a)
x, y, z = do('random.uniform', size=(3), like='numpy')
a = x / a
b = x / b
a = a / y
b = b / y
a /= z
b /= z
assert_allclose(a, b.compute())
a = do('random.uniform', size=(), like='numpy')
b = lazy.array(a)
x, y, z = do('random.uniform', size=(3), like='numpy')
a = x // a
b = x // b
a = a // y
b = b // y
a //= z
b //= z
assert_allclose(a, b.compute())
a = do('random.uniform', size=(), like='numpy')
b = lazy.array(a)
x, y, z = do('random.uniform', size=(3), like='numpy')
a = x ** a
b = x ** b
a = a ** y
b = b ** y
a **= z
b **= z
assert_allclose(a, b.compute())
a = do('random.uniform', size=(3, 3), like='numpy')
b = lazy.array(a)
x, y, z = do('random.uniform', size=(3, 3, 3), like='numpy')
a = x @ a
b = x @ b
a = a @ y
b = b @ y
a = a @ z
b @= z
assert_allclose(a, b.compute())
def test_indexing():
a = do('random.uniform', size=(2, 3, 4, 5), like='numpy')
b = lazy.array(a)
for key in [
0,
(1, ..., -1),
(0, 1, slice(None), -2)
]:
assert_allclose(a[key], b[key].compute())
def test_einsum():
a = do('random.uniform', size=(2, 3, 4, 5), like='numpy')
b = do('random.uniform', size=(4, 5), like='numpy')
c = do('random.uniform', size=(6, 2, 3), like='numpy')
eq = 'abcd,cd,fab->fd'
x1 = do('einsum', eq, a, b, c)
la, lb, lc = map(lazy.array, (a, b, c))
x2 = do('einsum', eq, la, lb, lc)
assert_allclose(x1, x2.compute())
def test_tensordot():
a = do('random.uniform', size=(7, 3, 4, 5), like='numpy')
b = do('random.uniform', size=(5, 6, 3, 2), like='numpy')
x1 = do('tensordot', a, b, axes=[(1, 3), (2, 0)])
la, lb = map(lazy.array, (a, b))
x2 = do('tensordot', la, lb, axes=[(1, 3), (2, 0)])
assert_allclose(x1, x2.compute())
def test_use_variable_to_trace_function():
a = lazy.Variable(shape=(2, 3), backend='numpy')
b = lazy.Variable(shape=(3, 4), backend='numpy')
c = do('tanh', a @ b)
f = c.get_function([a, b])
x = do('random.uniform', size=(2, 3), like='numpy')
y = do('random.uniform', size=(3, 4), like='numpy')
z = f([x, y])
assert z.shape == (2, 4)
| jcmgray/autoray | tests/test_lazy.py | Python | apache-2.0 | 11,880 |
"""
Demo of the histogram (hist) function used to plot a cumulative distribution.
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import mlab
def plot_loss_over_time(file_name):
with open(file_name, 'r') as f:
lines = f.readlines()
events = order_events(lines)
packets_sent = 0
packets_received = 0
time_stamps = []
packet_percent = []
for event in events:
time, type = event
if type == 's':
packets_sent += 1
else:
packets_received += 1
time_stamps.append(time)
packet_percent.append(float(packets_received)/packets_sent)
plt.plot(time_stamps, packet_percent)
plt.show()
def order_events(lines):
events = []
for line in lines:
split_line = line.split(" ")
send_time = int(split_line[0])
recv_time = int(split_line[1])
events.append((send_time, 's'))
events.append((recv_time, 'r'))
events.sort()
return events
plot_loss_over_time("tcp_160_10.out")
# 1. time on x-axis, percent packets received on y-axis
# 2. loss percentage on x-axis, 90 percentile latency on y-axis
# time packet sent, time packet received in nanoseconds
| andres-erbsen/rrtcp | results/scripts/plotting.py | Python | apache-2.0 | 1,242 |
#!/usr/bin/env python
# coding: utf-8
import email.utils
import logging
import os
import smtplib
import threading
from email.mime.text import MIMEText
from email.MIMEMultipart import MIMEMultipart
logger = logging.getLogger("maillog")
class MailBase(threading.Thread):
mailServerPort = 25
def __init__(self, subject, content, basic_info, attachment=""):
"""
多线程邮件处理类
@Params target: file or string
basicInfo= {
"TOLIST": ["heyu@ucweb.com"],
"SERVER": "mail.ucweb.com",
"PORT": 25, #25 if missing
"USERNAME": "test@ucweb.com",
"PASSWORD": ""
}
(attachment)
:param subject: 邮件标题
:param content: 文件名或内容,文件名超过50字符
:param basic_info: 邮件相关配置
:param attachment: 附件
"""
threading.Thread.__init__(self)
self._set_basic_info(basic_info)
self.subject = subject
self.content = content
self.attachment = attachment
def _set_basic_info(self, basic_info):
"""
:type basic_info: dict
"""
self.BASICS = {}
basic = ["TOLIST", "SERVER", "USERNAME", "PASSWORD", "PORT"]
if isinstance(basic_info, dict):
if "PORT" not in basic_info.keys():
basic_info["PORT"] = self.mailServerPort
if len(basic_info.keys()) != len(basic):
logger.error("params nums not correct~")
raise BadEmailSettings("basic_info param error")
for basic in basic:
if basic in basic_info.keys():
self.BASICS[basic] = basic_info[basic]
else:
logger.error("mail settings has no %s", basic)
raise BadEmailSettings()
else:
logger.error("basic_info should be a dict")
raise BadEmailSettings("basic_info not a dict")
def _send_mail(self, subject, content, attachment):
subject = subject.decode("utf-8")
self._do_send_mail(self.BASICS["TOLIST"], subject, content, attachment)
def run(self):
if not self.subject or not self.content:
return
self._send_mail(self.subject, self.content, self.attachment)
def _do_send_mail(self, to, subject, content, attachment):
msg = MIMEMultipart('related')
msg['To'] = ', '.join(to)
msg['From'] = email.utils.formataddr((self.BASICS["USERNAME"], self.BASICS["USERNAME"]))
msg['Subject'] = subject
# msgText = MIMEText(content.encode("utf-8"), "html")
msgtext = MIMEText(content, "html")
msgtext.set_charset('utf-8')
msg.attach(msgtext)
if attachment:
att = MIMEText(open(attachment, 'rb').read(), 'base64', 'utf-8')
att["Content-Type"] = 'application/octet-stream'
att["Content-Disposition"] = 'attachment;filename="%s"' % attachment
msg.attach(att)
server = smtplib.SMTP(self.BASICS["SERVER"], self.BASICS["PORT"])
server.set_debuglevel(False) # show communication with the server
server.login(self.BASICS["USERNAME"], self.BASICS["PASSWORD"])
try:
server.sendmail(self.BASICS["USERNAME"], to, msg.as_string())
finally:
server.quit()
class FileMail(MailBase):
"""
load文件发邮件
"""
def __init__(self, subject, mail_file, basic_info, attachment=""):
if len(mail_file) <= 50 and os.path.isfile(mail_file):
fd = open(mail_file)
content = fd.read()
content = "<br/>".join(content.split("\n"))
fd.close()
else:
content = ""
super(FileMail, self).__init__(subject, content, basic_info, attachment)
class BadEmailSettings(Exception):
pass
| gannicus-yu/pyutils | myutils/mailbase.py | Python | apache-2.0 | 4,008 |
import re
import os
import logging
from collections import defaultdict
from insights.config.static import get_config
from insights.config import AnalysisTarget, META_FILE_LIST, CommandSpec
logger = logging.getLogger(__name__)
logger.setLevel(logging.FATAL)
class SpecMapper(object):
"""
This class wraps a tarfile-like object with spec mapping of names.
"""
def __init__(self, tf_object, data_spec_config=None):
self.tf = tf_object
self.all_names = [f for f in self.tf.getnames() if self._name_filter(f)]
self.root = os.path.commonprefix(self.all_names)
logger.debug("SpecMapper.root: %s", self.root)
self.data_spec_config = data_spec_config if data_spec_config else get_config()
self.symbolic_files = defaultdict(list)
self.analysis_target = self._determine_analysis_target()
self.create_symbolic_file_list()
def _name_filter(self, name):
return not (self.tf.isdir(name) or name.endswith(".tar.gz"))
def _get_first_matching(self, pattern):
for match in filter(
re.compile(self.root + "?" + pattern + "$").match,
self.all_names):
return match
def _determine_analysis_target(self):
path = self._get_first_matching(META_FILE_LIST["analysis_target"])
if path:
section = self.get_content(path, symbolic=False)[0].strip()
return AnalysisTarget.get(section)
def _extend_symbolic_files(self, symbolic_name, matches):
if matches:
self.symbolic_files[symbolic_name].extend(matches)
def filter_commands(self, files):
for f in files:
if "sos_commands" in f or "insights_commands" in f or "commands/" in f:
yield f
def add_files(self, file_map):
logger.debug("ROOT: %s", self.root)
unrooted_map = {
f.split(self.root)[1]: f
for f in self.all_names
if f != self.root
}
unrooted_files = set(unrooted_map)
commands = set(self.filter_commands(unrooted_files))
non_commands = unrooted_files - commands
if logger.level == logging.DEBUG:
logger.debug("\n".join(uf for uf in sorted(unrooted_files)))
for symbolic_name, spec_group in file_map.iteritems():
for spec in spec_group.get_all_specs(): # Usually just one item in paths
is_command = isinstance(spec, CommandSpec)
# foreman-debug archives contain flat structures of commands
# that can be confused with other command outputs easily so
# we'll add a ^ to the beginning of the pattern if it is not an
# insights archive
if '/' in spec.get_path() or self.analysis_target is not None:
prefix = ''
else:
prefix = '^'
r = spec.get_regex(prefix=prefix, analysis_target=self.analysis_target)
if is_command or "_commands/" in r.pattern:
filter_set = commands
else:
filter_set = non_commands
logger.debug("Pattern: %s", r.pattern)
matches = filter(r.search, filter_set)
if matches:
matches = [unrooted_map[m] for m in matches]
# In order to prevent matching *dumb* symlinks in some
# archive formats, we are going to filter out symlinks when
# calculating matches for CommandSpecs
if is_command:
matches = filter(lambda n: not self.tf.issym(n), matches)
# filter out directories that match
matches = [m for m in matches if not self.tf.isdir(m)]
if not matches:
continue
# In order to prevent accidental duplication when matching
# files, we only allow the first matched file to be added
# to the working set for non-pattern file specs.
if not spec.is_multi_output() and len(matches) > 1:
logger.debug("Non multi-output file had multiple matches: %s", matches)
self._extend_symbolic_files(symbolic_name, [matches[0]])
else:
self._extend_symbolic_files(symbolic_name, matches)
break # only add the first matching pattern
def _add_meta_files(self):
for symbolic_name, suffix in META_FILE_LIST.items():
archive_path = self._get_first_matching(suffix)
if archive_path:
self._extend_symbolic_files(symbolic_name, [archive_path])
def create_symbolic_file_list(self):
self.add_files(self.data_spec_config.get_spec_lists())
if not self.analysis_target:
self.add_files(self.data_spec_config.get_meta_specs())
else:
self._add_meta_files()
def get_content(self, path, split=True, symbolic=True, default=""):
"""Returns file content from path, where path is the full pathname inside
the archive"""
if symbolic:
path = self.symbolic_files.get(path, [""])[0]
content = self.tf.extractfile(path) if path in self.all_names else default
return list(content.splitlines()) if split else content
def exists(self, path, symbolic=True):
return path in self.symbolic_files if symbolic else path in self.all_names
| PaulWay/insights-core | insights/core/specs.py | Python | apache-2.0 | 5,582 |
from django.urls import path, re_path
from django.conf import settings
from django.contrib.auth import views as auth_views
urlpatterns = [
re_path(
'login/',
auth_views.LoginView.as_view(template_name='dj_auth/login.html'),
name='login'),
re_path(
'logout/',
auth_views.LogoutView.as_view(),
name='logout'),
re_path(
r'^password_reset/$',
auth_views.PasswordResetView.as_view(
html_email_template_name='dj_auth/password_reset_email.html',
template_name='dj_auth/password_reset_form.html'),
name='password_reset'),
re_path(
r'^password_reset/done/$',
auth_views.PasswordResetDoneView.as_view(template_name='dj_auth/password_reset_done.html'),
name='password_reset_done'),
re_path(
r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
auth_views.PasswordResetConfirmView.as_view(template_name='dj_auth/password_reset_confirm.html'),
name='password_reset_confirm'),
re_path(
r'^reset/done/$',
auth_views.PasswordResetCompleteView.as_view(template_name='dj_auth/password_reset_complete.html'),
name='password_reset_complete'),
re_path(
r'^password_change/$',
auth_views.PasswordChangeView.as_view(template_name='dj_auth/password_change_form.html'),
name='password_change'),
re_path(
r'^password_change/done/$',
auth_views.PasswordChangeDoneView.as_view(template_name='dj_auth/password_change_done.html'),
name='password_change_done'),
]
| mava-ar/sgk | src/dj_auth/urls.py | Python | apache-2.0 | 1,618 |
import os
import shutil
import csv
class Restorer:
def __init__(self, backupDir):
self.backupDir = backupDir
if not self.backupDir.endswith('/'):
self.backupDir += '/'
def Run(self, filenamesListFname, doDelete=False):
if not os.path.exists(self.backupDir + filenamesListFname):
return
with open(self.backupDir + filenamesListFname, 'rb') as fnamesList:
filenameReader = reversed(list(csv.reader(fnamesList, delimiter='\t')))
for line in filenameReader:
shutil.copyfile(line[0], line[1])
if doDelete:
os.remove(line[0])
if doDelete:
os.remove(self.backupDir + filenamesListFname)
| mozafari/vprofiler | src/Restorer/Restorer.py | Python | apache-2.0 | 744 |
from io import StringIO
import csv
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
from django.test import TestCase
from django.urls import reverse
from django.utils.timezone import now
from register.forms import CSV_FIELDS, BlacklistedNumberEditForm, WhitelistedNumberEditForm
from register.models import Blacklist, Whitelist, RegistrationCenter, Registration
from register.tests.base import LibyaTest
from register.tests.factories import WhitelistFactory, BlacklistFactory, \
RegistrationCenterFactory, RegistrationFactory
from register.tests.test_center_csv import CenterFileTestMixin
from libya_elections.phone_numbers import get_random_phone_number, format_phone_number
from libya_elections.tests.utils import ResponseCheckerMixin
from libya_site.tests.factories import UserFactory, DEFAULT_USER_PASSWORD
from polling_reports.models import StaffPhone
from polling_reports.tests.factories import StaffPhoneFactory
from staff.tests.base import StaffUserMixin
class ImportBlackWhitelistViewMixin(StaffUserMixin, ResponseCheckerMixin):
"""Base class for TestImportBlacklistView and TestImportWhitelistView.
This doesn't inherit from TestCase, so it isn't executed by itself.
"""
def setUp(self):
super(ImportBlackWhitelistViewMixin, self).setUp()
# self.url = None
# self.model = None
# self.factory = None
def test_staff_can_see_form(self):
rsp = self.client.get(self.url, follow=False)
form = rsp.context['form']
self.assertNotIn('password', form.fields)
self.assertIn('import_file', form.fields)
def test_nonstaff_cant_see_form(self):
self.client.logout()
self.nonstaff_user = UserFactory(username='joe', password='puppy')
self.client.login(username='joe', password='puppy')
self.assertForbidden(self.client.get(self.url))
def test_valid_form(self):
# with all combinations of line endings (\r\n, \n, \r)
numbers = [get_random_phone_number() for i in range(4)]
punctuated_numbers = [format_phone_number(number)
for number in numbers]
file_content = ("""%s\r\n%s\n \n%s\r%s""" % (
punctuated_numbers[0],
punctuated_numbers[1],
punctuated_numbers[2],
punctuated_numbers[3],
)).encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data)
# Assert that we redirect
self.assertEqual(302, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
for number in numbers:
self.assertIn(number, bwlist)
self.assertEqual(len(bwlist), 4)
def test_import_number_twice_works(self):
"Importing a number that is already in list shouldn't cause an error"
number = get_random_phone_number()
self.factory(phone_number=number)
file_content = number.encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data)
# Assert that we redirect
self.assertEqual(302, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
self.assertEqual(len(bwlist), 1)
self.assertIn(number, bwlist)
def test_import_number_cant_start_with_2180(self):
"Ensures that the number doesn't start with 2180"
number = '218091234123'
file_content = number.encode()
blackwhitelist_file = ContentFile(file_content, name='bw.txt')
data = {'import_file': blackwhitelist_file}
rsp = self.client.post(self.url, data=data, follow=True)
self.assertEqual(200, rsp.status_code)
bwlist = self.model.objects.values_list('phone_number', flat=True)
self.assertEqual(len(bwlist), 0)
self.assertContains(rsp, 'Numbers on these lines not imported because '
'they are not valid phone numbers: 1.')
class TestImportBlacklistView(ImportBlackWhitelistViewMixin, LibyaTest):
"""Exercise uploading a list of blacklisted numbers"""
def setUp(self):
self.model = Blacklist
self.permissions = ('add_blacklist', 'browse_blacklist')
self.url = reverse('blacklisted-numbers-upload')
self.factory = BlacklistFactory
super(TestImportBlacklistView, self).setUp()
class TestImportWhitelistView(ImportBlackWhitelistViewMixin, LibyaTest):
"""Exercise uploading a list of whitelisted numbers"""
def setUp(self):
self.permissions = ('add_whitelist', 'browse_whitelist')
self.model = Whitelist
self.url = reverse('whitelisted-numbers-upload')
self.factory = WhitelistFactory
super(TestImportWhitelistView, self).setUp()
class BlackWhitelistEditFormMixin(StaffUserMixin, ResponseCheckerMixin):
"""Base class for TestBlacklistChangeForm and TestWhitelistChangeForm.
This doesn't inherit from TestCase, so it isn't executed by itself.
"""
def setUp(self):
super(BlackWhitelistEditFormMixin, self).setUp()
# self.factory = None
# self.form = None
def test_cleans_phone_number(self):
number = get_random_phone_number()
punctuated_number = format_phone_number(number)
form = self.form(data={'phone_number': punctuated_number})
self.assertTrue(form.is_valid(), form.errors)
self.assertEqual(form.cleaned_data['phone_number'], number)
def test_add_dupe_shows_form_error(self):
number = get_random_phone_number()
self.factory(phone_number=number)
form = self.form(data={'phone_number': number})
self.assertFalse(form.is_valid())
self.assertIn('Duplicate value for phone number', list(form.errors.values())[0])
def test_phone_number_cant_start_with_2180(self):
"Ensures the local prefix '0' isn't accidentally included in the phone number"
number = '218091234124'
form = self.form(data={'phone_number': number})
self.assertFalse(form.is_valid())
self.assertIn('Please enter a valid phone number', list(form.errors.values())[0][0])
class TestBlacklistChangeForm(BlackWhitelistEditFormMixin, TestCase):
"""Exercise Blacklist number editing"""
def setUp(self):
super(TestBlacklistChangeForm, self).setUp()
self.factory = BlacklistFactory
self.form = BlacklistedNumberEditForm
class TestWhitelistChangeForm(BlackWhitelistEditFormMixin, TestCase):
"""Exercise Whitelist number editing"""
def setUp(self):
super(TestWhitelistChangeForm, self).setUp()
self.factory = WhitelistFactory
self.form = WhitelistedNumberEditForm
class BlacklistDownload(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['read_blacklist']
model = Blacklist
def test_download_blacklist_file(self):
bl = BlacklistFactory()
rsp = self.client.get(reverse('blacklisted-numbers-download'))
self.assertOK(rsp)
self.assertIn(bl.phone_number, rsp.content.decode())
class WhitelistDownload(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['read_whitelist']
model = Whitelist
def test_download_whitelist_file(self):
wl = WhitelistFactory()
rsp = self.client.get(reverse('whitelisted-numbers-download'))
self.assertOK(rsp)
self.assertIn(wl.phone_number, rsp.content.decode())
class DeleteBlacklist(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_blacklist', 'browse_blacklist']
model = Blacklist
def setUp(self):
super(DeleteBlacklist, self).setUp()
self.url = reverse('blacklisted-numbers-delete')
BlacklistFactory.create_batch(size=3)
def test_get_deleted_page(self):
rsp = self.client.get(self.url)
self.assertOK(rsp)
self.assertIn('Are you sure you want to delete all 3', rsp.content.decode())
def test_post_deleted_page(self):
rsp = self.client.post(self.url, data={'ok': True})
self.assertRedirects(rsp, reverse('browse_blacklistednumbers'))
self.assertEqual(Blacklist.objects.count(), 0)
class DeleteWhitelist(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_whitelist', 'browse_whitelist']
model = Whitelist
def setUp(self):
super(DeleteWhitelist, self).setUp()
self.url = reverse('whitelisted-numbers-delete')
WhitelistFactory.create_batch(size=3)
def test_get_deleted_page(self):
rsp = self.client.get(self.url)
self.assertOK(rsp)
self.assertIn('Are you sure you want to delete all 3', rsp.content.decode())
def test_post_deleted_page(self):
rsp = self.client.post(self.url, data={'ok': True})
self.assertRedirects(rsp, reverse('browse_whitelistednumbers'))
self.assertEqual(Blacklist.objects.count(), 0)
class DeleteStaffPhone(StaffUserMixin, ResponseCheckerMixin, TestCase):
permissions = ['delete_staffphone', 'browse_staffphone']
model = StaffPhone
def setUp(self):
super(DeleteStaffPhone, self).setUp()
self.url = reverse('staffphones-delete')
StaffPhoneFactory.create_batch(size=3)
def test_get_deleted_page(self):
rsp = self.client.get(self.url)
self.assertOK(rsp)
self.assertIn('Are you sure you want to delete all 3', rsp.content.decode())
def test_post_deleted_page(self):
rsp = self.client.post(self.url, data={'ok': True})
self.assertRedirects(rsp, reverse('browse_staffphones'))
self.assertEqual(StaffPhone.objects.count(), 0)
class TestDeleteAllCopyCenters(StaffUserMixin, ResponseCheckerMixin, TestCase):
def setUp(self):
super(TestDeleteAllCopyCenters, self).setUp()
self.url = reverse('delete-all-copy-centers')
def add_permission(self, codename):
"""add permission with codename"""
permission = Permission.objects.get(codename=codename)
self.user.user_permissions.add(permission)
def test_permissions(self):
"""ensure permission required to access delete page"""
# no permission, no delete-o
self.assertForbidden(self.client.get(self.url))
self.assertForbidden(self.client.post(self.url, data={'ok': True}))
# Once you have permission, all is well.
self.add_permission('delete_registrationcenter')
# Also add browse so the redirect works
self.add_permission('browse_registrationcenter')
self.assertOK(self.client.get(self.url))
response = self.client.post(self.url, data={'ok': True})
self.assertRedirects(response, reverse('browse_registrationcenters'))
# not logged in ==> redirect
self.client.logout()
self.assertRedirectsToLogin(self.client.get(self.url))
def test_confirmation_page_shows_center_to_be_deleted(self):
"""Ensure user sees what's about to be deleted before it happens"""
self.add_permission('delete_registrationcenter')
self.add_permission('browse_registrationcenter')
# Create some copy centers
original = RegistrationCenterFactory()
copies = [RegistrationCenterFactory(copy_of=original) for i in range(3)]
self.assertEqual(RegistrationCenter.objects.all().count(), 4)
response = self.client.get(self.url)
self.assertOK(response)
self.assertIn('copy_centers', response.context)
context_copy_centers = sorted([center.id for center in response.context['copy_centers']])
copies = sorted([center.id for center in copies])
self.assertEqual(context_copy_centers, copies)
def test_delete_actually_deletes(self):
"""Ensure delete works as advertised"""
original = RegistrationCenterFactory()
RegistrationCenterFactory(copy_of=original)
self.assertEqual(RegistrationCenter.objects.all().count(), 2)
self.add_permission('delete_registrationcenter')
# Also add browse so the redirect works
self.add_permission('browse_registrationcenter')
response = self.client.post(self.url, data={'ok': True})
self.assertRedirects(response, reverse('browse_registrationcenters'))
centers = RegistrationCenter.objects.all()
self.assertEqual(len(centers), 1)
self.assertEqual(centers[0].id, original.id)
class TestRegistrationRead(StaffUserMixin, ResponseCheckerMixin, TestCase):
"""Test the read-registration view"""
permissions = ['read_registration']
model = Registration
def test_no_server_error_if_citizen_is_missing(self):
"""A missing citizen can cause a DoesNotExist error. Be sure to catch it."""
# create registration with a missing citizen
registration = RegistrationFactory(citizen__missing=now())
url = reverse('read_registration', kwargs={'pk': registration.pk})
response = self.client.get(url)
self.assertContains(response, registration.registration_center.center_id)
class TestRegistrationCenterDeleteLogic(StaffUserMixin, ResponseCheckerMixin, TestCase):
"""Ensure that centers with copies can't be deleted"""
permissions = ['delete_registrationcenter', 'read_registrationcenter',
'change_registrationcenter', ]
model = RegistrationCenter
def setUp(self):
super(TestRegistrationCenterDeleteLogic, self).setUp()
self.original = RegistrationCenterFactory()
self.copy = RegistrationCenterFactory(copy_of=self.original)
self.ordinary = RegistrationCenterFactory()
def test_read_and_edit_views_offer_delete_appropriately(self):
"""Ensure the Delete button is available in the read and edit views when appropriate"""
for center, should_offer_delete in ((self.original, False), (self.copy, True),
(self.ordinary, True),):
for url_name in ('read_registrationcenter', 'edit_registrationcenter'):
url = reverse(url_name, kwargs={'pk': center.id})
response = self.client.get(url)
delete_url = reverse('delete_registrationcenter', kwargs={'pk': center.id})
if should_offer_delete:
self.assertContains(response, delete_url)
else:
self.assertNotContains(response, delete_url)
def test_delete_view_available_appropriately(self):
"""Ensure the Delete view can be accessed when appropriate"""
for center, should_offer_delete in ((self.original, False), (self.copy, True),
(self.ordinary, True),):
delete_url = reverse('delete_registrationcenter', kwargs={'pk': center.id})
response = self.client.get(delete_url)
if should_offer_delete:
self.assertOK(response)
else:
self.assertForbidden(response)
class CenterDownload(CenterFileTestMixin, StaffUserMixin, TestCase):
permissions = ['read_registrationcenter']
model = RegistrationCenter
def setUp(self):
super(CenterDownload, self).setUp()
self.download_csv_url = reverse('download-centers-csv')
def test_download_link_is_on_ecc_form(self):
url = reverse('upload-centers-csv')
# Need 'add registrationcenter' to get to the upload page
content_type = ContentType.objects.get_for_model(self.model)
self.user.user_permissions.add(Permission.objects.get(content_type=content_type,
codename='add_registrationcenter'))
rsp = self.client.get(url)
self.assertEqual(200, rsp.status_code)
self.assertContains(rsp, self.download_csv_url)
def test_download_csv_file(self):
# upload the test CSV to get some data in the DB
self.upload_csv()
# Add one with null values
rc_with_nones = RegistrationCenterFactory(name="Center with no center_lat or center_lon",
center_lat=None,
center_lon=None)
self.assertEqual(rc_with_nones.center_lat, None)
self.assertEqual(rc_with_nones.center_lon, None)
# download the CSV file
rsp = self.client.get(self.download_csv_url)
self.assertEqual(200, rsp.status_code)
reader = csv.reader(StringIO(rsp.content.decode()))
for i, field in enumerate(next(reader)):
# check the header row
self.assertEqual(field, CSV_FIELDS[i])
for row in reader:
# check each row against the DB values
self.assertNotIn('None', str(row))
center_id = row[0]
center = RegistrationCenter.objects.get(center_id=center_id)
for i, field in enumerate(CSV_FIELDS):
# center_type is special because it is an integer in the DB, but a string in the CSV
if field == 'center_type':
db_field_as_str = center.get_center_type_display()
else:
db_field_as_str = str(getattr(center, field))
if db_field_as_str == 'None':
db_field_as_str = ''
self.assertEqual(row[i], db_field_as_str)
class RegistrationSearchTest(TestCase):
@classmethod
def setUpTestData(cls):
cls.staff_user = UserFactory()
cls.staff_user.is_staff = True
cls.staff_user.save()
# give this user permission to browse
ct = ContentType.objects.get_for_model(Registration)
perm_codename = 'browse_registration'
perm = Permission.objects.get(content_type=ct, codename=perm_codename)
cls.staff_user.user_permissions.add(perm)
# create 2 registrations, one that we expect to find and one that we expect not to find
cls.nid_we_should_find = 200000000001
cls.phone_we_should_find = '218900000002'
cls.nid_we_should_not_find = 200000000003
cls.phone_we_should_not_find = '218000000004'
cls.nonexistent_nid = 200000000005
cls.nonexistent_phone = '218900000006'
cls.present_reg = RegistrationFactory(
archive_time=None,
citizen__national_id=cls.nid_we_should_find,
sms__from_number=cls.phone_we_should_find)
cls.absent_reg = RegistrationFactory(
archive_time=None,
citizen__national_id=cls.nid_we_should_not_find,
sms__from_number=cls.phone_we_should_not_find)
def setUp(self):
assert self.client.login(username=self.staff_user.username, password=DEFAULT_USER_PASSWORD)
self.browse_url = reverse('browse_registrations')
def test_search_finds_national_id(self):
rsp = self.client.get(self.browse_url, data={'q': self.nid_we_should_find})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
def test_search_finds_phone_number(self):
rsp = self.client.get(self.browse_url, data={'q': self.phone_we_should_find})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
def test_search_strips_whitespace_national_id(self):
rsp = self.client.get(self.browse_url, data={'q': ' %s ' % self.nid_we_should_find})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
def test_search_strips_whitespace_phone_number(self):
rsp = self.client.get(self.browse_url, data={'q': ' %s ' % self.phone_we_should_find})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
def test_empty_search_result(self):
rsp = self.client.get(self.browse_url, data={'q': self.nonexistent_nid})
self.assertEqual(list(rsp.context['object_list']), [])
rsp = self.client.get(self.browse_url, data={'q': self.nonexistent_phone})
self.assertEqual(list(rsp.context['object_list']), [])
def test_not_a_valid_nid_or_phone(self):
rsp = self.client.get(self.browse_url, data={'q': '1234'})
self.assertEqual(list(rsp.context['object_list']), [])
def test_search_for_nondigit(self):
search_term = self.present_reg.citizen.first_name
rsp = self.client.get(self.browse_url, data={'q': search_term})
self.assertIn(self.present_reg, rsp.context['object_list'])
self.assertNotIn(self.absent_reg, rsp.context['object_list'])
| SmartElect/SmartElect | register/tests/test_views.py | Python | apache-2.0 | 21,018 |
from .base import FileTransport, HttpFileTransport, BlitzortungDataPath, BlitzortungDataPathGenerator
from .raw_signal import RawSignalsBlitzortungDataProvider
from .station import StationsBlitzortungDataProvider
from .strike import StrikesBlitzortungDataProvider
def strikes():
from .. import INJECTOR
return INJECTOR.get(StrikesBlitzortungDataProvider)
def stations():
from .. import INJECTOR
return INJECTOR.get(StationsBlitzortungDataProvider)
def raw():
from .. import INJECTOR
return INJECTOR.get(RawSignalsBlitzortungDataProvider)
| wuan/bo-python | blitzortung/dataimport/__init__.py | Python | apache-2.0 | 571 |
# Microsoft Azure Linux Agent
#
# Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import glob
import os.path
import re
import signal
import sys
import azurelinuxagent.common.conf as conf
import azurelinuxagent.common.utils.fileutil as fileutil
from azurelinuxagent.common import version
from azurelinuxagent.common.exception import ProtocolError
from azurelinuxagent.common.osutil import get_osutil
from azurelinuxagent.common.persist_firewall_rules import PersistFirewallRulesHandler
from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.ga.exthandlers import HANDLER_COMPLETE_NAME_PATTERN
def read_input(message):
if sys.version_info[0] >= 3:
return input(message)
else:
# This is not defined in python3, and the linter will thus
# throw an undefined-variable<E0602> error on this line.
# Suppress it here.
return raw_input(message) # pylint: disable=E0602
class DeprovisionAction(object):
def __init__(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
self.func = func
self.args = args
self.kwargs = kwargs
def invoke(self):
self.func(*self.args, **self.kwargs)
class DeprovisionHandler(object):
def __init__(self):
self.osutil = get_osutil()
self.protocol_util = get_protocol_util()
self.actions_running = False
signal.signal(signal.SIGINT, self.handle_interrupt_signal)
def del_root_password(self, warnings, actions):
warnings.append("WARNING! root password will be disabled. "
"You will not be able to login as root.")
actions.append(DeprovisionAction(self.osutil.del_root_password))
def del_user(self, warnings, actions):
try:
ovfenv = self.protocol_util.get_ovf_env()
except ProtocolError:
warnings.append("WARNING! ovf-env.xml is not found.")
warnings.append("WARNING! Skip delete user.")
return
username = ovfenv.username
warnings.append(("WARNING! {0} account and entire home directory "
"will be deleted.").format(username))
actions.append(DeprovisionAction(self.osutil.del_account,
[username]))
def regen_ssh_host_key(self, warnings, actions):
warnings.append("WARNING! All SSH host key pairs will be deleted.")
actions.append(DeprovisionAction(fileutil.rm_files,
[conf.get_ssh_key_glob()]))
def stop_agent_service(self, warnings, actions):
warnings.append("WARNING! The waagent service will be stopped.")
actions.append(DeprovisionAction(self.osutil.stop_agent_service))
def del_dirs(self, warnings, actions): # pylint: disable=W0613
dirs = [conf.get_lib_dir(), conf.get_ext_log_dir()]
actions.append(DeprovisionAction(fileutil.rm_dirs, dirs))
def del_files(self, warnings, actions): # pylint: disable=W0613
files = ['/root/.bash_history', conf.get_agent_log_file()]
actions.append(DeprovisionAction(fileutil.rm_files, files))
# For OpenBSD
actions.append(DeprovisionAction(fileutil.rm_files,
["/etc/random.seed",
"/var/db/host.random",
"/etc/isakmpd/local.pub",
"/etc/isakmpd/private/local.key",
"/etc/iked/private/local.key",
"/etc/iked/local.pub"]))
def del_resolv(self, warnings, actions):
warnings.append("WARNING! /etc/resolv.conf will be deleted.")
files_to_del = ["/etc/resolv.conf"]
actions.append(DeprovisionAction(fileutil.rm_files, files_to_del))
def del_dhcp_lease(self, warnings, actions):
warnings.append("WARNING! Cached DHCP leases will be deleted.")
dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"]
actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del))
# For FreeBSD and OpenBSD
actions.append(DeprovisionAction(fileutil.rm_files,
["/var/db/dhclient.leases.*"]))
# For FreeBSD, NM controlled
actions.append(DeprovisionAction(fileutil.rm_files,
["/var/lib/NetworkManager/dhclient-*.lease"]))
def del_ext_handler_files(self, warnings, actions): # pylint: disable=W0613
ext_dirs = [d for d in os.listdir(conf.get_lib_dir())
if os.path.isdir(os.path.join(conf.get_lib_dir(), d))
and re.match(HANDLER_COMPLETE_NAME_PATTERN, d) is not None
and not version.is_agent_path(d)]
for ext_dir in ext_dirs:
ext_base = os.path.join(conf.get_lib_dir(), ext_dir)
files = glob.glob(os.path.join(ext_base, 'status', '*.status'))
files += glob.glob(os.path.join(ext_base, 'config', '*.settings'))
files += glob.glob(os.path.join(ext_base, 'config', 'HandlerStatus'))
files += glob.glob(os.path.join(ext_base, 'mrseq'))
if len(files) > 0:
actions.append(DeprovisionAction(fileutil.rm_files, files))
def del_lib_dir_files(self, warnings, actions): # pylint: disable=W0613
known_files = [
'HostingEnvironmentConfig.xml',
'Incarnation',
'partition',
'Protocol',
'SharedConfig.xml',
'WireServerEndpoint'
]
known_files_glob = [
'Extensions.*.xml',
'ExtensionsConfig.*.xml',
'GoalState.*.xml'
]
lib_dir = conf.get_lib_dir()
files = [f for f in \
[os.path.join(lib_dir, kf) for kf in known_files] \
if os.path.isfile(f)]
for p in known_files_glob:
files += glob.glob(os.path.join(lib_dir, p))
if len(files) > 0:
actions.append(DeprovisionAction(fileutil.rm_files, files))
def reset_hostname(self, warnings, actions): # pylint: disable=W0613
localhost = ["localhost.localdomain"]
actions.append(DeprovisionAction(self.osutil.set_hostname,
localhost))
actions.append(DeprovisionAction(self.osutil.set_dhcp_hostname,
localhost))
def setup(self, deluser):
warnings = []
actions = []
self.stop_agent_service(warnings, actions)
if conf.get_regenerate_ssh_host_key():
self.regen_ssh_host_key(warnings, actions)
self.del_dhcp_lease(warnings, actions)
self.reset_hostname(warnings, actions)
if conf.get_delete_root_password():
self.del_root_password(warnings, actions)
self.del_dirs(warnings, actions)
self.del_files(warnings, actions)
self.del_resolv(warnings, actions)
if deluser:
self.del_user(warnings, actions)
self.del_persist_firewall_rules(actions)
return warnings, actions
def setup_changed_unique_id(self):
warnings = []
actions = []
self.del_dhcp_lease(warnings, actions)
self.del_lib_dir_files(warnings, actions)
self.del_ext_handler_files(warnings, actions)
self.del_persist_firewall_rules(actions)
return warnings, actions
def run(self, force=False, deluser=False):
warnings, actions = self.setup(deluser)
self.do_warnings(warnings)
if self.do_confirmation(force=force):
self.do_actions(actions)
def run_changed_unique_id(self):
'''
Clean-up files and directories that may interfere when the VM unique
identifier has changed.
While users *should* manually deprovision a VM, the files removed by
this routine will help keep the agent from getting confused
(since incarnation and extension settings, among other items, will
no longer be monotonically increasing).
'''
warnings, actions = self.setup_changed_unique_id()
self.do_warnings(warnings)
self.do_actions(actions)
def do_actions(self, actions):
self.actions_running = True
for action in actions:
action.invoke()
self.actions_running = False
def do_confirmation(self, force=False):
if force:
return True
confirm = read_input("Do you want to proceed (y/n)")
return True if confirm.lower().startswith('y') else False
def do_warnings(self, warnings):
for warning in warnings:
print(warning)
def handle_interrupt_signal(self, signum, frame): # pylint: disable=W0613
if not self.actions_running:
print("Deprovision is interrupted.")
sys.exit(0)
print ('Deprovisioning may not be interrupted.')
return
@staticmethod
def del_persist_firewall_rules(actions):
agent_network_service_path = PersistFirewallRulesHandler.get_service_file_path()
actions.append(DeprovisionAction(fileutil.rm_files,
[agent_network_service_path, os.path.join(conf.get_lib_dir(),
PersistFirewallRulesHandler.BINARY_FILE_NAME)]))
| Azure/WALinuxAgent | azurelinuxagent/pa/deprovision/default.py | Python | apache-2.0 | 10,146 |
#!/usr/bin/env python
import re
from ciscoconfparse import CiscoConfParse
def main():
'''
using the ciscoconfparse to find the crypto maps that are not using AES
'''
cisco_file = 'cisco_ipsec.txt'
cisco_cfg = CiscoConfParse(cisco_file)
crypto_maps = cisco_cfg.find_objects_wo_child(parentspec=r"^crypto map CRYPTO", childspec=r"AES")
print "\n Crypto Maps not using AES:"
for entry in crypto_maps:
for child in entry.children:
if 'transform' in child.text:
match = re.search(r"set transform-set (.*)$", child.text)
encryption = match.group(1)
print " {0} >>> {1}".format(entry.text.strip(), encryption)
print
if __name__ == "__main__":
main()
| hbenaouich/Learning-Python | class-1/ex10_confParse.py | Python | apache-2.0 | 760 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_name
from tempest import config
from tempest import exceptions
from tempest.test import attr
from tempest.test import skip_because
class QuotasAdminTestJSON(base.BaseComputeAdminTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(QuotasAdminTestJSON, cls).setUpClass()
cls.auth_url = cls.config.identity.uri
cls.client = cls.os.quotas_client
cls.adm_client = cls.os_adm.quotas_client
cls.identity_admin_client = cls._get_identity_admin_client()
cls.sg_client = cls.security_groups_client
resp, tenants = cls.identity_admin_client.list_tenants()
# NOTE(afazekas): these test cases should always create and use a new
# tenant most of them should be skipped if we can't do that
if cls.config.compute.allow_tenant_isolation:
cls.demo_tenant_id = cls.isolated_creds.get_primary_user().get(
'tenantId')
else:
cls.demo_tenant_id = [tnt['id'] for tnt in tenants if tnt['name']
== cls.config.identity.tenant_name][0]
cls.default_quota_set = set(('injected_file_content_bytes',
'metadata_items', 'injected_files',
'ram', 'floating_ips',
'fixed_ips', 'key_pairs',
'injected_file_path_bytes',
'instances', 'security_group_rules',
'cores', 'security_groups'))
@attr(type='smoke')
def test_get_default_quotas(self):
# Admin can get the default resource quota set for a tenant
expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_default_quota_set(
self.demo_tenant_id)
self.assertEqual(200, resp.status)
self.assertEqual(sorted(expected_quota_set),
sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.demo_tenant_id)
@attr(type='gate')
def test_update_all_quota_resources_for_tenant(self):
# Admin can update all the resource quota limits for a tenant
resp, default_quota_set = self.client.get_default_quota_set(
self.demo_tenant_id)
new_quota_set = {'injected_file_content_bytes': 20480,
'metadata_items': 256, 'injected_files': 10,
'ram': 10240, 'floating_ips': 20, 'fixed_ips': 10,
'key_pairs': 200, 'injected_file_path_bytes': 512,
'instances': 20, 'security_group_rules': 20,
'cores': 2, 'security_groups': 20}
# Update limits for all quota resources
resp, quota_set = self.adm_client.update_quota_set(
self.demo_tenant_id,
force=True,
**new_quota_set)
default_quota_set.pop('id')
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id, **default_quota_set)
self.assertEqual(200, resp.status)
self.assertEqual(new_quota_set, quota_set)
# TODO(afazekas): merge these test cases
@attr(type='gate')
def test_get_updated_quotas(self):
# Verify that GET shows the updated quota set
tenant_name = rand_name('cpu_quota_tenant_')
tenant_desc = tenant_name + '-desc'
identity_client = self.os_adm.identity_client
_, tenant = identity_client.create_tenant(name=tenant_name,
description=tenant_desc)
tenant_id = tenant['id']
self.addCleanup(identity_client.delete_tenant,
tenant_id)
self.adm_client.update_quota_set(tenant_id,
ram='5120')
resp, quota_set = self.adm_client.get_quota_set(tenant_id)
self.assertEqual(200, resp.status)
self.assertEqual(quota_set['ram'], 5120)
# TODO(afazekas): Add dedicated tenant to the skiped quota tests
# it can be moved into the setUpClass as well
@attr(type='gate')
def test_create_server_when_cpu_quota_is_full(self):
# Disallow server creation when tenant's vcpu quota is full
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_vcpu_quota = quota_set['cores']
vcpu_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set = self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
cores=vcpu_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
cores=default_vcpu_quota)
self.assertRaises(exceptions.OverLimit, self.create_server)
@attr(type='gate')
def test_create_server_when_memory_quota_is_full(self):
# Disallow server creation when tenant's memory quota is full
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_mem_quota = quota_set['ram']
mem_quota = 0 # Set the quota to zero to conserve resources
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
ram=mem_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
ram=default_mem_quota)
self.assertRaises(exceptions.OverLimit, self.create_server)
@attr(type='gate')
def test_update_quota_normal_user(self):
self.assertRaises(exceptions.Unauthorized,
self.client.update_quota_set,
self.demo_tenant_id,
ram=0)
@attr(type=['negative', 'gate'])
def test_create_server_when_instances_quota_is_full(self):
# Once instances quota limit is reached, disallow server creation
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_instances_quota = quota_set['instances']
instances_quota = 0 # Set quota to zero to disallow server creation
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
instances=instances_quota)
self.addCleanup(self.adm_client.update_quota_set, self.demo_tenant_id,
instances=default_instances_quota)
self.assertRaises(exceptions.OverLimit, self.create_server)
@skip_because(bug="1186354",
condition=config.TempestConfig().service_available.neutron)
@attr(type=['negative', 'gate'])
def test_security_groups_exceed_limit(self):
# Negative test: Creation Security Groups over limit should FAIL
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_sg_quota = quota_set['security_groups']
sg_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set =\
self.adm_client.update_quota_set(self.demo_tenant_id,
force=True,
security_groups=sg_quota)
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id,
security_groups=default_sg_quota)
# Check we cannot create anymore
self.assertRaises(exceptions.OverLimit,
self.sg_client.create_security_group,
"sg-overlimit", "sg-desc")
@skip_because(bug="1186354",
condition=config.TempestConfig().service_available.neutron)
@attr(type=['negative', 'gate'])
def test_security_groups_rules_exceed_limit(self):
# Negative test: Creation of Security Group Rules should FAIL
# when we reach limit maxSecurityGroupRules
resp, quota_set = self.client.get_quota_set(self.demo_tenant_id)
default_sg_rules_quota = quota_set['security_group_rules']
sg_rules_quota = 0 # Set the quota to zero to conserve resources
resp, quota_set =\
self.adm_client.update_quota_set(
self.demo_tenant_id,
force=True,
security_group_rules=sg_rules_quota)
self.addCleanup(self.adm_client.update_quota_set,
self.demo_tenant_id,
security_group_rules=default_sg_rules_quota)
s_name = rand_name('securitygroup-')
s_description = rand_name('description-')
resp, securitygroup =\
self.sg_client.create_security_group(s_name, s_description)
self.addCleanup(self.sg_client.delete_security_group,
securitygroup['id'])
secgroup_id = securitygroup['id']
ip_protocol = 'tcp'
# Check we cannot create SG rule anymore
self.assertRaises(exceptions.OverLimit,
self.sg_client.create_security_group_rule,
secgroup_id, ip_protocol, 1025, 1025)
class QuotasAdminTestXML(QuotasAdminTestJSON):
_interface = 'xml'
| itskewpie/tempest | tempest/api/compute/admin/test_quotas.py | Python | apache-2.0 | 10,071 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_dns
short_description: Manage BIG-IP device DNS settings
description:
- Manage BIG-IP device DNS settings
version_added: "2.2"
options:
cache:
description:
- Specifies whether the system caches DNS lookups or performs the
operation each time a lookup is needed. Please note that this applies
only to Access Policy Manager features, such as ACLs, web application
rewrites, and authentication.
default: disable
choices:
- enabled
- disabled
name_servers:
description:
- A list of name servers that the system uses to validate DNS lookups
forwarders:
deprecated: Deprecated in 2.4. Use the GUI or edit named.conf.
description:
- A list of BIND servers that the system can use to perform DNS lookups
search:
description:
- A list of domains that the system searches for local domain lookups,
to resolve local host names.
ip_version:
description:
- Specifies whether the DNS specifies IP addresses using IPv4 or IPv6.
choices:
- 4
- 6
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value).
default: present
choices:
- absent
- present
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Set the DNS settings on the BIG-IP
bigip_device_dns:
name_servers:
- 208.67.222.222
- 208.67.220.220
search:
- localdomain
- lab.local
password: secret
server: lb.mydomain.com
user: admin
validate_certs: no
delegate_to: localhost
'''
RETURN = r'''
cache:
description: The new value of the DNS caching
returned: changed
type: string
sample: enabled
name_servers:
description: List of name servers that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
search:
description: List of search domains that were set
returned: changed
type: list
sample: ['192.0.2.10', '172.17.12.10']
ip_version:
description: IP version that was set that DNS will specify IP addresses in
returned: changed
type: int
sample: 4
warnings:
description: The list of warnings (if any) generated by module based on arguments
returned: always
type: list
sample: ['...', '...']
'''
from ansible.module_utils.f5_utils import AnsibleF5Client
from ansible.module_utils.f5_utils import AnsibleF5Parameters
from ansible.module_utils.f5_utils import HAS_F5SDK
from ansible.module_utils.f5_utils import F5ModuleError
try:
from ansible.module_utils.f5_utils import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {
'dhclient.mgmt': 'dhcp',
'dns.cache': 'cache',
'nameServers': 'name_servers',
'include': 'ip_version'
}
api_attributes = [
'nameServers', 'search', 'include'
]
updatables = [
'cache', 'name_servers', 'search', 'ip_version'
]
returnables = [
'cache', 'name_servers', 'search', 'ip_version'
]
absentables = [
'name_servers', 'search'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
@property
def search(self):
result = []
if self._values['search'] is None:
return None
for server in self._values['search']:
result.append(str(server))
return result
@property
def name_servers(self):
result = []
if self._values['name_servers'] is None:
return None
for server in self._values['name_servers']:
result.append(str(server))
return result
@property
def cache(self):
if str(self._values['cache']) in ['enabled', 'enable']:
return 'enable'
else:
return 'disable'
@property
def dhcp(self):
valid = ['enable', 'enabled']
return True if self._values['dhcp'] in valid else False
@property
def forwarders(self):
if self._values['forwarders'] is None:
return None
else:
raise F5ModuleError(
"The modifying of forwarders is not supported."
)
@property
def ip_version(self):
if self._values['ip_version'] in [6, '6', 'options inet6']:
return "options inet6"
elif self._values['ip_version'] in [4, '4', '']:
return ""
else:
return None
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.update()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def read_current_from_device(self):
want_keys = ['dns.cache']
result = dict()
dbs = self.client.api.tm.sys.dbs.get_collection()
for db in dbs:
if db.name in want_keys:
result[db.name] = db.value
dns = self.client.api.tm.sys.dns.load()
attrs = dns.attrs
if 'include' not in attrs:
attrs['include'] = 4
result.update(attrs)
return Parameters(result)
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update_on_device(self):
params = self.want.api_params()
tx = self.client.api.tm.transactions.transaction
with BigIpTxContext(tx) as api:
cache = api.tm.sys.dbs.db.load(name='dns.cache')
dns = api.tm.sys.dns.load()
# Empty values can be supplied, but you cannot supply the
# None value, so we check for that specifically
if self.want.cache is not None:
cache.update(value=self.want.cache)
if params:
dns.update(**params)
def _absent_changed_options(self):
changed = {}
for key in Parameters.absentables:
if getattr(self.want, key) is not None:
set_want = set(getattr(self.want, key))
set_have = set(getattr(self.have, key))
set_new = set_have - set_want
if set_new != set_have:
changed[key] = list(set_new)
if changed:
self.changes = Parameters(changed)
return True
return False
def should_absent(self):
result = self._absent_changed_options()
if result:
return True
return False
def absent(self):
self.have = self.read_current_from_device()
if not self.should_absent():
return False
if self.client.check_mode:
return True
self.absent_on_device()
return True
def absent_on_device(self):
params = self.changes.api_params()
tx = self.client.api.tm.transactions.transaction
with BigIpTxContext(tx) as api:
dns = api.tm.sys.dns.load()
dns.update(**params)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
cache=dict(
required=False,
choices=['disabled', 'enabled', 'disable', 'enable'],
default=None
),
name_servers=dict(
required=False,
default=None,
type='list'
),
forwarders=dict(
required=False,
default=None,
type='list'
),
search=dict(
required=False,
default=None,
type='list'
),
ip_version=dict(
required=False,
default=None,
choices=[4, 6],
type='int'
),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
)
)
self.required_one_of = [
['name_servers', 'search', 'forwarders', 'ip_version', 'cache']
]
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| mcgonagle/ansible_f5 | library/bigip_device_dns.py | Python | apache-2.0 | 11,184 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import web
from web import form as webform
import httpconfig
class Form(object):
"""Form class"""
def __init__(self, names=[]):
self._form = self.createForm(names)
self.httpConfig = httpconfig.HttpConfig(web.ctx.env["DOCUMENT_ROOT"])
@property
def form(self):
return self._form
def createForm(self, names=[]):
# Text area for sending path data
pathDataArea = webform.Textarea("", rows=30, cols=90, value="", id="pathData", hidden=True)
form = webform.Form(pathDataArea)
return form
| inbloom/legacy-projects | lri-middleware/path_builder/form.py | Python | apache-2.0 | 1,107 |
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from opentelemetry import _metrics as metrics_api
from opentelemetry import trace as trace_api
from opentelemetry.util._once import Once
# pylint: disable=protected-access
def reset_trace_globals() -> None:
"""WARNING: only use this for tests."""
trace_api._TRACER_PROVIDER_SET_ONCE = Once()
trace_api._TRACER_PROVIDER = None
trace_api._PROXY_TRACER_PROVIDER = trace_api.ProxyTracerProvider()
# pylint: disable=protected-access
def reset_metrics_globals() -> None:
"""WARNING: only use this for tests."""
metrics_api._METER_PROVIDER_SET_ONCE = Once() # type: ignore[attr-defined]
metrics_api._METER_PROVIDER = None # type: ignore[attr-defined]
metrics_api._PROXY_METER_PROVIDER = metrics_api._ProxyMeterProvider() # type: ignore[attr-defined]
class TraceGlobalsTest(unittest.TestCase):
"""Resets trace API globals in setUp/tearDown
Use as a base class or mixin for your test that modifies trace API globals.
"""
def setUp(self) -> None:
super().setUp()
reset_trace_globals()
def tearDown(self) -> None:
super().tearDown()
reset_trace_globals()
class MetricsGlobalsTest(unittest.TestCase):
"""Resets metrics API globals in setUp/tearDown
Use as a base class or mixin for your test that modifies metrics API globals.
"""
def setUp(self) -> None:
super().setUp()
reset_metrics_globals()
def tearDown(self) -> None:
super().tearDown()
reset_metrics_globals()
| open-telemetry/opentelemetry-python | tests/opentelemetry-test-utils/src/opentelemetry/test/globals_test.py | Python | apache-2.0 | 2,110 |
#Interphase - Copyright (C) 2009 James Garnon <http://gatc.ca/>
#Released under the MIT License <http://opensource.org/licenses/MIT>
from __future__ import division
import os
from env import engine
__docformat__ = 'restructuredtext'
class Text(object):
"""
Receives text to display on surface.
Arguments include the target surface for text rendering, font_type is a list of alternate font names, and font_size is the font size.
"""
_font = {}
_cache = {}
def __init__(self, surface, font_type=None, font_size=None):
self.screen = surface
x, y = self.screen.get_size()
self.dimension = {'x':x, 'y':y}
self.message = None
self.messages = []
if font_size:
self.font_size = int(font_size)
else:
self.font_size = 10
if isinstance(font_type, str):
font_type = [font_type]
if not Text._font:
engine.font.init()
font = None
if font_type:
font_type = ','.join(font_type)
if font_type.startswith('file:'):
font = font_type[5:].strip()
if not os.path.exists(font):
print('Font not found: %s' % font)
font = None
else:
font = engine.font.match_font(font_type)
if not font:
font_type = 'verdana, tahoma, bitstreamverasans, freesans, arial'
font = engine.font.match_font(font_type)
if not font:
font = engine.font.get_default_font()
font_type = font
Text._font['default'] = font
Text._font['defaults'] = font_type
Text._font[font] = { self.font_size:engine.font.Font(font,self.font_size) }
font_type = None
if font_type:
font_type = ','.join(font_type)
if font_type != Text._font['defaults']:
if font_type.startswith('file:'):
font_type = font_type[5:].strip()
if not os.path.exists(font_type):
print('Font not found: %s' % font_type)
font_type = None
else:
font_type = engine.font.match_font(font_type)
if font_type:
if font_type not in Text._font:
Text._font[font_type] = { self.font_size:engine.font.Font(font_type,self.font_size) }
else:
font_type = Text._font['default']
else:
font_type = Text._font['default']
else:
font_type = Text._font['default']
if self.font_size not in Text._font[font_type]:
Text._font[font_type][self.font_size] = engine.font.Font(font_type,self.font_size)
self.font_type = font_type
self.font = Text._font[self.font_type]
self.x = 0
self.y = 0
self.center = False
self.font_color = (255,0,0)
self.font_bgcolor = (0,0,0)
self.split_text = False
self.linesize = self.font[self.font_size].get_linesize()
self.margin = {'t':0, 'r':0, 'b':0, 'l':0}
self.multiline = False
self.cache = None
self.cache_key = None
def __call__(self, surface='default'):
"""Writes text to surface."""
if surface == 'default':
self.surface = self.screen
else:
self.surface = surface
self.update()
return self.surface
def render(self, surface='default'):
"""Writes text to surface."""
if surface == 'default':
self.surface = self.screen
else:
self.surface = surface
self.update()
return self.surface
def add(self,*message_append):
"""Add to text."""
for item in message_append:
self.message = str(item)
self.messages.append(self.message)
def set_position(self, position, center=False):
"""Set position to write text."""
x, y = position
if x < self.dimension['x'] and y < self.dimension['y']:
self.x = x
self.y = y
if center:
self.center = True
return True
else:
return False
def set_text_alignment(self, setting):
"""Set text alignment. Setting is 'center' or 'left'."""
if setting == 'center':
self.center = True
elif setting == 'left':
self.center = False
def set_margin(self, margin):
"""Set text margin."""
try:
self.margin['t'], self.margin['r'], self.margin['b'], self.margin['l'] = margin
except TypeError:
self.margin['t'] = self.margin['r'] = self.margin['b'] = self.margin['l'] = margin
def set_multiline(self, multiline=True):
"""Set multiline text."""
self.multiline = multiline
def set_font(self, font_type, default=False):
"""Set font of text."""
if isinstance(font_type, str):
font_type = [font_type]
font_type = ','.join(font_type)
if font_type == 'default':
font_type = Text._font['default']
self.font = Text._font[font_type]
self.font_type = font_type
elif font_type != Text._font['defaults']:
if font_type.startswith('file:'):
font = font_type[5:].strip()
if not os.path.exists(font):
print('Font not found: %s' % font)
font = None
else:
font = engine.font.match_font(font_type)
if font:
if font not in Text._font:
Text._font[font] = { self.font_size:engine.font.Font(font,self.font_size) }
self.font = Text._font[font]
self.font_type = font
if default:
Text._font['default'] = font
Text._font['defaults'] = font_type
self.linesize = self.font[self.font_size].get_linesize()
self.cache = None
def get_font(self, font_info='font'):
"""Get current font."""
if font_info == 'font':
return self.font_type
elif font_info == 'default':
return Text._font['default']
elif font_info == 'system':
return engine.font.get_fonts()
def get_font_size(self):
"""Get current font size."""
return self.font_size
def set_font_size(self, size):
"""Set font size of text."""
self.font_size = size
if size not in Text._font[self.font_type]:
Text._font[self.font_type][self.font_size] = engine.font.Font(self.font_type,self.font_size)
self.font = Text._font[self.font_type]
self.linesize = self.font[self.font_size].get_linesize()
self.cache = None
def set_font_color(self, color):
"""Set font color of text."""
self.font_color = color
self.cache = None
def set_font_bgcolor(self, color=None):
"""Set font background color."""
self.font_bgcolor = color
self.cache = None
def set_split_text(self, split_text=True):
"""Set whether text split to new line at space."""
self.split_text = split_text
def check_size(self, text):
"""Get size required for given text."""
width, height = self.font[self.font_size].size(text)
return width, height
def check_sizes(self, texts):
"""Get size required for a list of texts."""
text_size = {}
for text in texts:
text_size[text] = self.check_size(text)
return text_size
def surface_size(self, *dim):
"""Surface size needed to fit text. Return estimated width for col and height for row, adjusted for margins."""
try:
col, row = dim[0], dim[1]
except IndexError:
col, row = dim[0]
sizes = [self.check_size(char)[0] for char in 'abcdefghijklmnopqrstuvwxyz ']
charsize = sum(sizes)//len(sizes)
width = (col*charsize) + (self.margin['l']+self.margin['r'])
height = ((row*self.linesize)-2) + (self.margin['t']+self.margin['b'])
return width, height
def word_wrap(self, text, width):
"""Format text lines to fit in surface width, adjusted for margins."""
text_width = width - (self.margin['l']+self.margin['r'])
if isinstance(text, list):
textlines = text
else:
textlines = [line for line in text.splitlines()]
txtlines = []
line_num = 0
space_size = self.check_size(' ')[0]
while True:
try:
line = textlines[line_num]
except IndexError:
break
if self.check_size(line)[0] > text_width:
words = line.split(' ')
txt_line = []
size_sum = 0
word_num = 0
for word in words:
word_size = self.check_size(word)[0]
if word_size > text_width:
ln = self.split_long_text(word, text_width)
if txt_line:
txtlines.append(' '.join(txt_line))
txt_line = []
size_sum = 0
word_num = 0
txtlines.extend(ln)
continue
size_sum += word_size
if size_sum + word_num*space_size <= text_width:
txt_line.append(word)
word_num += 1
else:
txtlines.append(' '.join(txt_line))
txt_line = []
txt_line.append(word)
size_sum = word_size
word_num = 1
if txt_line:
txtlines.append(' '.join(txt_line))
else:
txtlines.append(line)
line_num += 1
return txtlines
def split_long_text(self, text, width):
"""Split long text uninterrupted by spaces to fit in surface width."""
char_size = self.check_sizes(set(text))
ln = []
chars = []
size_sum = 0
for char in text:
size_sum += char_size[char][0]
if size_sum <= width:
chars.append(char)
else:
ln.append(''.join(chars))
chars = []
chars.append(char)
size_sum = char_size[char][0]
if chars:
ln.append(''.join(chars))
return ln
def has_text(self):
"""Check whether contains text."""
if self.messages:
return True
else:
return False
def clear_text(self):
"""Clear text."""
self.message = None
self.messages = []
def _cache_chr(self, ch):
if self.font_bgcolor:
text_surface = self.font[self.font_size].render(ch, True, self.font_color, self.font_bgcolor)
else:
text_surface = self.font[self.font_size].render(ch, True, self.font_color)
try:
self.cache[ch] = {'image':text_surface, 'width':text_surface.get_width()}
except TypeError:
self.cache_key = self.font_type + str(self.font_size) + str(self.font_color) + str(self.font_bgcolor)
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {}
self.cache = self._cache[self.cache_key]
self.cache[ch] = {'image':text_surface, 'width':text_surface.get_width()}
def _get_width(self, text):
width = 0
for ch in text:
try:
width += self.cache[ch]['width']
except (KeyError, TypeError):
self._cache_chr(ch)
width += self.cache[ch]['width']
return width
def tprint(self):
"""Print text to surface."""
if self.messages != []:
if not self.cache:
self.cache_key = self.font_type + str(self.font_size) + str(self.font_color) + str(self.font_bgcolor)
if self.cache_key not in self._cache:
self._cache[self.cache_key] = {}
self.cache = self._cache[self.cache_key]
if not self.multiline:
text = " ".join(self.messages)
if not self.split_text or text.strip().count(' ') == 0:
if self.center:
width = self._get_width(text)
x = self.x - (width//2)
else:
x = self.x + self.margin['l']
for ch in text:
if ch not in self.cache:
self._cache_chr(ch)
self.surface.blit(self.cache[ch]['image'], (x,self.y))
x += self.cache[ch]['width']
else:
words = text.count(' ')
position_y = self.y - words*(self.linesize//2) - 1
texts = text.split(' ')
for count, text in enumerate(texts):
if self.center:
width = self._get_width(text)
x = self.x - (width//2)
y = position_y + (count*self.linesize)
else:
x = self.x
y = position_y + (count*self.linesize)
for ch in text:
if ch not in self.cache:
self._cache_chr(ch)
self.surface.blit(self.cache[ch]['image'], (x,y))
x += self.cache[ch]['width']
else:
position_y = self.y + self.margin['t']
for count, text in enumerate(self.messages):
if self.center:
width = self._get_width(text)
x = self.x - (width//2)
y = position_y + (count*self.linesize)
else:
x = self.x + self.margin['l']
y = position_y + (count*self.linesize)
for ch in text:
if ch not in self.cache:
self._cache_chr(ch)
self.surface.blit(self.cache[ch]['image'], (x,y))
x += self.cache[ch]['width']
self.message = None
self.messages = []
def update(self):
self.tprint()
def load_image(filename, frames=1, path='data', zipobj=None, fileobj=None, colorkey=None, errorhandle=True, errorreport=True):
"""
Load image from file.
Arguments include the image filename, the number of image frames in an image strip, the image path, zipobj for image in a zip file, fileobj for image in a file-like object, image colorkey, errorhandle and errorreport for exception handling.
"""
def convert_image(image, colorkey):
if image.get_alpha():
image = image.convert_alpha()
else:
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, engine.RLEACCEL)
return image
if zipobj:
import zipfile
try:
import cStringIO
except ImportError:
import StringIO as cStringIO
if isinstance(zipobj, str):
if path:
data_file = os.path.join(path, zipobj)
else:
data_file = zipobj
dat = zipfile.ZipFile(data_file)
fileobj = cStringIO.StringIO(dat.open(filename).read())
dat.close()
else:
fileobj = cStringIO.StringIO(zipobj.open(filename).read())
full_name = fileobj
namehint = filename
elif fileobj:
full_name = fileobj
namehint = filename
else:
if path:
full_name = os.path.join(path, filename)
else:
full_name = filename
namehint = ''
try:
if frames == 1:
image = engine.image.load(full_name, namehint)
image = convert_image(image, colorkey)
return image
elif frames > 1:
images = []
image = engine.image.load(full_name, namehint)
width, height = image.get_size()
width = width // frames
for frame in range(frames):
frame_num = width * frame
image_frame = image.subsurface((frame_num,0,width,height)).copy()
image_frame.set_alpha(image.get_alpha())
image_frame = convert_image(image_frame, colorkey)
images.append(image_frame)
return images
except engine.error, message:
if errorhandle:
raise
else:
if errorreport:
print(message)
raise IOError
return None
| nordicpower/GameListPatch | interphase/util.py | Python | apache-2.0 | 17,408 |
from math import sqrt
from datetime import datetime
from dexter.models import db, Document, Person
from sqlalchemy.sql import func
from sqlalchemy.orm import joinedload
class BaseAnalyser(object):
"""
Base for analyser objects that handles a collection of
documents to analyse, based on either document ids
or start and end dates.
"""
TREND_UP = 0.5
TREND_DOWN = -0.5
def __init__(self, doc_ids=None, start_date=None, end_date=None):
self.doc_ids = doc_ids
self.start_date = start_date
self.end_date = end_date
# we need either a date range or document ids, fill in
# whichever is missing
self._calculate_date_range()
self._fetch_doc_ids()
self.n_documents = len(self.doc_ids)
def _calculate_date_range(self):
"""
The date range is the range of publication dates for the given
documents.
"""
if not self.start_date or not self.end_date:
if self.doc_ids is None:
raise ValueError("Need either doc_ids, or both start_date and end_date")
row = db.session.query(
func.min(Document.published_at),
func.max(Document.published_at))\
.filter(Document.id.in_(self.doc_ids))\
.first()
if row and row[0]:
self.start_date = row[0].date()
self.end_date = row[1].date()
else:
self.start_date = self.end_date = datetime.utcnow()
self.days = max((self.end_date - self.start_date).days, 1)
def _fetch_doc_ids(self):
if self.doc_ids is None:
rows = db.session.query(Document.id)\
.filter(Document.published_at >= self.start_date.strftime('%Y-%m-%d 00:00:00'))\
.filter(Document.published_at <= self.end_date.strftime('%Y-%m-%d 23:59:59'))\
.all()
self.doc_ids = [r[0] for r in rows]
def _lookup_people(self, ids):
query = Person.query \
.options(joinedload(Person.affiliation)) \
.filter(Person.id.in_(ids))
return dict([p.id, p] for p in query.all())
def moving_weighted_avg_zscore(obs, decay=0.8):
"""
Calculate a moving-weighted average z-score, based on +obs+,
a list of observations, and +decay+, the rate at which
observations decay.
See http://stackoverflow.com/questions/787496/what-is-the-best-way-to-compute-trending-topics-or-tags
See http://pandas.pydata.org/pandas-docs/stable/generated/pandas.ewma.html#pandas.ewma
"""
avg = 0.0
sq_avg = 0.0
last = len(obs)-1
for i, x in enumerate(obs):
if i == 0:
# first item
avg = float(x)
sq_avg = float(x ** 2)
elif i == last:
# basic std deviation
std = sqrt(sq_avg - avg ** 2)
if std == 0:
return x - avg
else:
return (x - avg) / std
else:
# fold it in
avg = avg * decay + (1.0-decay) * x
sq_avg = sq_avg * decay + (1.0-decay) * (x ** 2) | Code4SA/mma-dexter | dexter/analysis/base.py | Python | apache-2.0 | 3,167 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'EndPortEnum' : _MetaInfoEnum('EndPortEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper',
{
'echo':'echo',
'discard':'discard',
'daytime':'daytime',
'chargen':'chargen',
'ftp-data':'ftp_data',
'ftp':'ftp',
'ssh':'ssh',
'telnet':'telnet',
'smtp':'smtp',
'time':'time',
'nicname':'nicname',
'tacacs':'tacacs',
'domain':'domain',
'gopher':'gopher',
'finger':'finger',
'www':'www',
'host-name':'host_name',
'pop2':'pop2',
'pop3':'pop3',
'sun-rpc':'sun_rpc',
'ident':'ident',
'nntp':'nntp',
'bgp':'bgp',
'irc':'irc',
'pim-auto-rp':'pim_auto_rp',
'exec':'exec_',
'login':'login',
'cmd':'cmd',
'lpd':'lpd',
'uucp':'uucp',
'klogin':'klogin',
'kshell':'kshell',
'talk':'talk',
'ldp':'ldp',
}, 'Cisco-IOS-XR-infra-objmgr-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper']),
'PortEnum' : _MetaInfoEnum('PortEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper',
{
'echo':'echo',
'discard':'discard',
'daytime':'daytime',
'chargen':'chargen',
'ftp-data':'ftp_data',
'ftp':'ftp',
'ssh':'ssh',
'telnet':'telnet',
'smtp':'smtp',
'time':'time',
'nicname':'nicname',
'tacacs':'tacacs',
'domain':'domain',
'gopher':'gopher',
'finger':'finger',
'www':'www',
'host-name':'host_name',
'pop2':'pop2',
'pop3':'pop3',
'sun-rpc':'sun_rpc',
'ident':'ident',
'nntp':'nntp',
'bgp':'bgp',
'irc':'irc',
'pim-auto-rp':'pim_auto_rp',
'exec':'exec_',
'login':'login',
'cmd':'cmd',
'lpd':'lpd',
'uucp':'uucp',
'klogin':'klogin',
'kshell':'kshell',
'talk':'talk',
'ldp':'ldp',
}, 'Cisco-IOS-XR-infra-objmgr-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper']),
'PortOperatorEnum' : _MetaInfoEnum('PortOperatorEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper',
{
'equal':'equal',
'not-equal':'not_equal',
'greater-than':'greater_than',
'less-than':'less_than',
}, 'Cisco-IOS-XR-infra-objmgr-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper']),
'StartPortEnum' : _MetaInfoEnum('StartPortEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper',
{
'echo':'echo',
'discard':'discard',
'daytime':'daytime',
'chargen':'chargen',
'ftp-data':'ftp_data',
'ftp':'ftp',
'ssh':'ssh',
'telnet':'telnet',
'smtp':'smtp',
'time':'time',
'nicname':'nicname',
'tacacs':'tacacs',
'domain':'domain',
'gopher':'gopher',
'finger':'finger',
'www':'www',
'host-name':'host_name',
'pop2':'pop2',
'pop3':'pop3',
'sun-rpc':'sun_rpc',
'ident':'ident',
'nntp':'nntp',
'bgp':'bgp',
'irc':'irc',
'pim-auto-rp':'pim_auto_rp',
'exec':'exec_',
'login':'login',
'cmd':'cmd',
'lpd':'lpd',
'uucp':'uucp',
'klogin':'klogin',
'kshell':'kshell',
'talk':'talk',
'ldp':'ldp',
}, 'Cisco-IOS-XR-infra-objmgr-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper']),
'ObjectGroup.Port.Objects.Object.NestedGroups.NestedGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.NestedGroups.NestedGroup',
False,
[
_MetaInfoClassMember('nested-group-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' Nested object group
''',
'nested_group_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('nested-group-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' Nested group
''',
'nested_group_name_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'nested-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects.Object.NestedGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.NestedGroups',
False,
[
_MetaInfoClassMember('nested-group', REFERENCE_LIST, 'NestedGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object.NestedGroups.NestedGroup',
[], [],
''' nested object group
''',
'nested_group',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'nested-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects.Object.Operators.Operator' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.Operators.Operator',
False,
[
_MetaInfoClassMember('operator-type', REFERENCE_ENUM_CLASS, 'PortOperatorEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'PortOperatorEnum',
[], [],
''' operation for ports
''',
'operator_type',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('operator-type-xr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Operator
''',
'operator_type_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('port', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Port number
''',
'port',
'Cisco-IOS-XR-infra-objmgr-oper', False, [
_MetaInfoClassMember('port', REFERENCE_ENUM_CLASS, 'PortEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'PortEnum',
[], [],
''' Port number
''',
'port',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Port number
''',
'port',
'Cisco-IOS-XR-infra-objmgr-oper', False),
]),
_MetaInfoClassMember('port-xr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Port
''',
'port_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'operator',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects.Object.Operators' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.Operators',
False,
[
_MetaInfoClassMember('operator', REFERENCE_LIST, 'Operator' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object.Operators.Operator',
[], [],
''' op class
''',
'operator',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'operators',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects.Object.PortRanges.PortRange' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.PortRanges.PortRange',
False,
[
_MetaInfoClassMember('end-port', REFERENCE_UNION, 'str' , None, None,
[], [],
''' End port number
''',
'end_port',
'Cisco-IOS-XR-infra-objmgr-oper', False, [
_MetaInfoClassMember('end-port', REFERENCE_ENUM_CLASS, 'EndPortEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'EndPortEnum',
[], [],
''' End port number
''',
'end_port',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('end-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' End port number
''',
'end_port',
'Cisco-IOS-XR-infra-objmgr-oper', False),
]),
_MetaInfoClassMember('end-port-xr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Port end address
''',
'end_port_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('start-port', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Start port number
''',
'start_port',
'Cisco-IOS-XR-infra-objmgr-oper', False, [
_MetaInfoClassMember('start-port', REFERENCE_ENUM_CLASS, 'StartPortEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'StartPortEnum',
[], [],
''' Start port number
''',
'start_port',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('start-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Start port number
''',
'start_port',
'Cisco-IOS-XR-infra-objmgr-oper', False),
]),
_MetaInfoClassMember('start-port-xr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Port start address
''',
'start_port_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'port-range',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects.Object.PortRanges' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.PortRanges',
False,
[
_MetaInfoClassMember('port-range', REFERENCE_LIST, 'PortRange' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object.PortRanges.PortRange',
[], [],
''' Match only packets on a given port range
''',
'port_range',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'port-ranges',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects.Object.ParentGroups.ParentGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.ParentGroups.ParentGroup',
False,
[
_MetaInfoClassMember('parent-group-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' Nested object group
''',
'parent_group_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('parent-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Parent node
''',
'parent_name',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'parent-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects.Object.ParentGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object.ParentGroups',
False,
[
_MetaInfoClassMember('parent-group', REFERENCE_LIST, 'ParentGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object.ParentGroups.ParentGroup',
[], [],
''' Parent object group
''',
'parent_group',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'parent-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects.Object' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects.Object',
False,
[
_MetaInfoClassMember('object-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' Port object group name
''',
'object_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('nested-groups', REFERENCE_CLASS, 'NestedGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object.NestedGroups',
[], [],
''' Table of NestedGroup
''',
'nested_groups',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('operators', REFERENCE_CLASS, 'Operators' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object.Operators',
[], [],
''' Table of Operator
''',
'operators',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('parent-groups', REFERENCE_CLASS, 'ParentGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object.ParentGroups',
[], [],
''' Table of ParentGroup
''',
'parent_groups',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('port-ranges', REFERENCE_CLASS, 'PortRanges' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object.PortRanges',
[], [],
''' Table of PortRange
''',
'port_ranges',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'object',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port.Objects' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port.Objects',
False,
[
_MetaInfoClassMember('object', REFERENCE_LIST, 'Object' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects.Object',
[], [],
''' Port object group
''',
'object',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'objects',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Port' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Port',
False,
[
_MetaInfoClassMember('objects', REFERENCE_CLASS, 'Objects' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port.Objects',
[], [],
''' Table of Object
''',
'objects',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'port',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups.NestedGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups.NestedGroup',
False,
[
_MetaInfoClassMember('nested-group-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' Enter the name of a nested object group
''',
'nested_group_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('nested-group-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' Nested group
''',
'nested_group_name_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'nested-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups',
False,
[
_MetaInfoClassMember('nested-group', REFERENCE_LIST, 'NestedGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups.NestedGroup',
[], [],
''' nested object group
''',
'nested_group',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'nested-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.Addresses.Address' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.Addresses.Address',
False,
[
_MetaInfoClassMember('prefix', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 prefix x:x::x/y
''',
'prefix',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '128')], [],
''' Prefix of the IP Address
''',
'prefix_length',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('prefix-length-xr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix length
''',
'prefix_length_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('prefix-xr', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv4 Address
''',
'prefix_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'address',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.Addresses' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.Addresses',
False,
[
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.Addresses.Address',
[], [],
''' IPv6 address
''',
'address',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'addresses',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges.AddressRange' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges.AddressRange',
False,
[
_MetaInfoClassMember('end-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'end_address',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('end-address-xr', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Range end address
''',
'end_address_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('start-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IPv6 address
''',
'start_address',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('start-address-xr', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Range start address
''',
'start_address_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'address-range',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges',
False,
[
_MetaInfoClassMember('address-range', REFERENCE_LIST, 'AddressRange' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges.AddressRange',
[], [],
''' Range of host addresses
''',
'address_range',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'address-ranges',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups.ParentGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups.ParentGroup',
False,
[
_MetaInfoClassMember('parent-group-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' Nested object group
''',
'parent_group_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('parent-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Parent node
''',
'parent_name',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'parent-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups',
False,
[
_MetaInfoClassMember('parent-group', REFERENCE_LIST, 'ParentGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups.ParentGroup',
[], [],
''' Parent object group
''',
'parent_group',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'parent-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.Hosts.Host' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.Hosts.Host',
False,
[
_MetaInfoClassMember('host-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' host ipv6 address
''',
'host_address',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('host-address-xr', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Host address
''',
'host_address_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'host',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object.Hosts' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object.Hosts',
False,
[
_MetaInfoClassMember('host', REFERENCE_LIST, 'Host' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.Hosts.Host',
[], [],
''' A single host address
''',
'host',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'hosts',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects.Object' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects.Object',
False,
[
_MetaInfoClassMember('object-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' IPv6 object group name - maximum 64
characters
''',
'object_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('address-ranges', REFERENCE_CLASS, 'AddressRanges' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges',
[], [],
''' Table of AddressRange
''',
'address_ranges',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('addresses', REFERENCE_CLASS, 'Addresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.Addresses',
[], [],
''' Table of Address
''',
'addresses',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('hosts', REFERENCE_CLASS, 'Hosts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.Hosts',
[], [],
''' Table of Host
''',
'hosts',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('nested-groups', REFERENCE_CLASS, 'NestedGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups',
[], [],
''' Table of NestedGroup
''',
'nested_groups',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('parent-groups', REFERENCE_CLASS, 'ParentGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups',
[], [],
''' Table of parent object group
''',
'parent_groups',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'object',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6.Objects' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6.Objects',
False,
[
_MetaInfoClassMember('object', REFERENCE_LIST, 'Object' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects.Object',
[], [],
''' IPv6 object group
''',
'object',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'objects',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv6' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv6',
False,
[
_MetaInfoClassMember('objects', REFERENCE_CLASS, 'Objects' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6.Objects',
[], [],
''' Table of Object
''',
'objects',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'ipv6',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups.NestedGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups.NestedGroup',
False,
[
_MetaInfoClassMember('nested-group-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' Nested object group
''',
'nested_group_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('nested-group-name-xr', ATTRIBUTE, 'str' , None, None,
[], [],
''' Nested group
''',
'nested_group_name_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'nested-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups',
False,
[
_MetaInfoClassMember('nested-group', REFERENCE_LIST, 'NestedGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups.NestedGroup',
[], [],
''' Nested object group
''',
'nested_group',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'nested-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.Addresses.Address' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.Addresses.Address',
False,
[
_MetaInfoClassMember('prefix', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address/prefix
''',
'prefix',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('prefix-length', ATTRIBUTE, 'int' , None, None,
[('0', '32')], [],
''' Prefix of the IP Address
''',
'prefix_length',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('prefix-length-xr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Prefix length
''',
'prefix_length_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('prefix-xr', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 Address
''',
'prefix_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'address',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.Addresses' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.Addresses',
False,
[
_MetaInfoClassMember('address', REFERENCE_LIST, 'Address' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.Addresses.Address',
[], [],
''' IPv4 address
''',
'address',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'addresses',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges.AddressRange' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges.AddressRange',
False,
[
_MetaInfoClassMember('end-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'end_address',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('end-address-xr', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Range end address
''',
'end_address_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('start-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 address
''',
'start_address',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('start-address-xr', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Range start address
''',
'start_address_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'address-range',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges',
False,
[
_MetaInfoClassMember('address-range', REFERENCE_LIST, 'AddressRange' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges.AddressRange',
[], [],
''' Range of host addresses
''',
'address_range',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'address-ranges',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups.ParentGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups.ParentGroup',
False,
[
_MetaInfoClassMember('parent-group-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' Nested object group
''',
'parent_group_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('parent-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Parent node
''',
'parent_name',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'parent-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups',
False,
[
_MetaInfoClassMember('parent-group', REFERENCE_LIST, 'ParentGroup' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups.ParentGroup',
[], [],
''' Parent object group
''',
'parent_group',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'parent-groups',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.Hosts.Host' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.Hosts.Host',
False,
[
_MetaInfoClassMember('host-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Host ipv4 address
''',
'host_address',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('host-address-xr', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Host address
''',
'host_address_xr',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'host',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object.Hosts' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object.Hosts',
False,
[
_MetaInfoClassMember('host', REFERENCE_LIST, 'Host' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.Hosts.Host',
[], [],
''' A single host address
''',
'host',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'hosts',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects.Object' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects.Object',
False,
[
_MetaInfoClassMember('object-name', ATTRIBUTE, 'str' , None, None,
[(1, 64)], [],
''' IPv4 object group name - maximum 64
characters
''',
'object_name',
'Cisco-IOS-XR-infra-objmgr-oper', True),
_MetaInfoClassMember('address-ranges', REFERENCE_CLASS, 'AddressRanges' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges',
[], [],
''' Table of AddressRange
''',
'address_ranges',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('addresses', REFERENCE_CLASS, 'Addresses' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.Addresses',
[], [],
''' Table of Address
''',
'addresses',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('hosts', REFERENCE_CLASS, 'Hosts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.Hosts',
[], [],
''' Table of Host
''',
'hosts',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('nested-groups', REFERENCE_CLASS, 'NestedGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups',
[], [],
''' Table of NestedGroup
''',
'nested_groups',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('parent-groups', REFERENCE_CLASS, 'ParentGroups' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups',
[], [],
''' Table of parent object group
''',
'parent_groups',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'object',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4.Objects' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4.Objects',
False,
[
_MetaInfoClassMember('object', REFERENCE_LIST, 'Object' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects.Object',
[], [],
''' IPv4 object group
''',
'object',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'objects',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network.Ipv4' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network.Ipv4',
False,
[
_MetaInfoClassMember('objects', REFERENCE_CLASS, 'Objects' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4.Objects',
[], [],
''' Table of Object
''',
'objects',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'ipv4',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup.Network' : {
'meta_info' : _MetaInfoClass('ObjectGroup.Network',
False,
[
_MetaInfoClassMember('ipv4', REFERENCE_CLASS, 'Ipv4' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv4',
[], [],
''' IPv4 object group
''',
'ipv4',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('ipv6', REFERENCE_CLASS, 'Ipv6' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network.Ipv6',
[], [],
''' IPv6 object group
''',
'ipv6',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'network',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
'ObjectGroup' : {
'meta_info' : _MetaInfoClass('ObjectGroup',
False,
[
_MetaInfoClassMember('network', REFERENCE_CLASS, 'Network' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Network',
[], [],
''' Network object group
''',
'network',
'Cisco-IOS-XR-infra-objmgr-oper', False),
_MetaInfoClassMember('port', REFERENCE_CLASS, 'Port' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper', 'ObjectGroup.Port',
[], [],
''' Port object group
''',
'port',
'Cisco-IOS-XR-infra-objmgr-oper', False),
],
'Cisco-IOS-XR-infra-objmgr-oper',
'object-group',
_yang_ns._namespaces['Cisco-IOS-XR-infra-objmgr-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_objmgr_oper'
),
},
}
_meta_table['ObjectGroup.Port.Objects.Object.NestedGroups.NestedGroup']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object.NestedGroups']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.Operators.Operator']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object.Operators']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.PortRanges.PortRange']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object.PortRanges']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.ParentGroups.ParentGroup']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object.ParentGroups']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.NestedGroups']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.Operators']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.PortRanges']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object.ParentGroups']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Port.Objects.Object']['meta_info'].parent =_meta_table['ObjectGroup.Port.Objects']['meta_info']
_meta_table['ObjectGroup.Port.Objects']['meta_info'].parent =_meta_table['ObjectGroup.Port']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups.NestedGroup']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Addresses.Address']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Addresses']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges.AddressRange']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups.ParentGroup']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Hosts.Host']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Hosts']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.NestedGroups']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Addresses']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.AddressRanges']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.ParentGroups']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object.Hosts']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects.Object']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6.Objects']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6.Objects']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv6']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups.NestedGroup']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Addresses.Address']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Addresses']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges.AddressRange']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups.ParentGroup']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Hosts.Host']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Hosts']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.NestedGroups']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Addresses']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.AddressRanges']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.ParentGroups']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object.Hosts']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects.Object']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4.Objects']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4.Objects']['meta_info'].parent =_meta_table['ObjectGroup.Network.Ipv4']['meta_info']
_meta_table['ObjectGroup.Network.Ipv6']['meta_info'].parent =_meta_table['ObjectGroup.Network']['meta_info']
_meta_table['ObjectGroup.Network.Ipv4']['meta_info'].parent =_meta_table['ObjectGroup.Network']['meta_info']
_meta_table['ObjectGroup.Port']['meta_info'].parent =_meta_table['ObjectGroup']['meta_info']
_meta_table['ObjectGroup.Network']['meta_info'].parent =_meta_table['ObjectGroup']['meta_info']
| 111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_infra_objmgr_oper.py | Python | apache-2.0 | 57,513 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainsite', '0005_auto_20150909_0246'),
]
operations = [
migrations.CreateModel(
name='EmailAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, verbose_name='ID', serialize=False)),
('name', models.CharField(max_length=100)),
('email_address', models.CharField(max_length=200)),
],
),
migrations.RemoveField(
model_name='email',
name='customer',
),
migrations.RemoveField(
model_name='email',
name='location',
),
migrations.RenameField(
model_name='customer',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='location',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='session',
old_name='user',
new_name='owner',
),
migrations.RenameField(
model_name='sessiontype',
old_name='user',
new_name='owner',
),
migrations.AddField(
model_name='address',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='link',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.AddField(
model_name='phone',
name='owner',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
migrations.DeleteModel(
name='Email',
),
migrations.AddField(
model_name='emailaddress',
name='customer',
field=models.ForeignKey(to='mainsite.Customer', null=True, blank=True),
),
migrations.AddField(
model_name='emailaddress',
name='location',
field=models.ForeignKey(to='mainsite.Location', null=True, blank=True),
),
migrations.AddField(
model_name='emailaddress',
name='owner',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
]
| srenner/photerva | mainsite/migrations/0006_auto_20150916_0219.py | Python | apache-2.0 | 2,722 |
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import struct
from .message import Message
@Message.register
class Termination(Message):
TYPE = Message.TERMINATION
TYPE_STR = 'termination'
reason_codict = {
0: "Session administratively closed. The session might be re-initiated.",
1: "Unspecified reason.",
2: "Out of resources. The router has exhausted resources available for the BMP session.",
3: "Redundant connection. The router has determined\
that this connection is redundant with another one.",
4: "Session permanently administratively closed,\
will not be re-initiated. Monitoring station should reduce\
(potentially to 0) the rate at which it attempts\
reconnection to the monitored router."
}
@classmethod
def unpack(cls, data):
infor_tlv = dict()
while data:
info_type, info_len = struct.unpack('!HH', data[0:4])
info_value = data[4: 4 + info_len]
if info_type == 0:
infor_tlv['string'] = info_value.decode('ascii')
elif info_type == 1:
infor_tlv['reason'] = cls.reason_codict[struct.unpack('!H', info_value)[0]]
data = data[4 + info_len:]
return cls(value=infor_tlv)
| smartbgp/libbgp | libbgp/bmp/termination.py | Python | apache-2.0 | 1,910 |
"""Class to reload platforms."""
from __future__ import annotations
import asyncio
from collections.abc import Iterable
import logging
from typing import Any
from homeassistant import config as conf_util
from homeassistant.const import SERVICE_RELOAD
from homeassistant.core import Event, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import async_get_integration
from homeassistant.setup import async_setup_component
from . import config_per_platform
from .entity_platform import EntityPlatform, async_get_platforms
from .typing import ConfigType
# mypy: disallow-any-generics
_LOGGER = logging.getLogger(__name__)
async def async_reload_integration_platforms(
hass: HomeAssistant, integration_name: str, integration_platforms: Iterable[str]
) -> None:
"""Reload an integration's platforms.
The platform must support being re-setup.
This functionality is only intended to be used for integrations that process
Home Assistant data and make this available to other integrations.
Examples are template, stats, derivative, utility meter.
"""
try:
unprocessed_conf = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(err)
return
tasks = [
_resetup_platform(
hass, integration_name, integration_platform, unprocessed_conf
)
for integration_platform in integration_platforms
]
await asyncio.gather(*tasks)
async def _resetup_platform(
hass: HomeAssistant,
integration_name: str,
integration_platform: str,
unprocessed_conf: ConfigType,
) -> None:
"""Resetup a platform."""
integration = await async_get_integration(hass, integration_platform)
conf = await conf_util.async_process_component_config(
hass, unprocessed_conf, integration
)
if not conf:
return
root_config: dict[str, Any] = {integration_platform: []}
# Extract only the config for template, ignore the rest.
for p_type, p_config in config_per_platform(conf, integration_platform):
if p_type != integration_name:
continue
root_config[integration_platform].append(p_config)
component = integration.get_component()
if hasattr(component, "async_reset_platform"):
# If the integration has its own way to reset
# use this method.
await component.async_reset_platform(hass, integration_name)
await component.async_setup(hass, root_config)
return
# If it's an entity platform, we use the entity_platform
# async_reset method
platform = async_get_platform_without_config_entry(
hass, integration_name, integration_platform
)
if platform:
await _async_reconfig_platform(platform, root_config[integration_platform])
return
if not root_config[integration_platform]:
# No config for this platform
# and it's not loaded. Nothing to do.
return
await _async_setup_platform(
hass, integration_name, integration_platform, root_config[integration_platform]
)
async def _async_setup_platform(
hass: HomeAssistant,
integration_name: str,
integration_platform: str,
platform_configs: list[dict[str, Any]],
) -> None:
"""Platform for the first time when new configuration is added."""
if integration_platform not in hass.data:
await async_setup_component(
hass, integration_platform, {integration_platform: platform_configs}
)
return
entity_component = hass.data[integration_platform]
tasks = [
entity_component.async_setup_platform(integration_name, p_config)
for p_config in platform_configs
]
await asyncio.gather(*tasks)
async def _async_reconfig_platform(
platform: EntityPlatform, platform_configs: list[dict[str, Any]]
) -> None:
"""Reconfigure an already loaded platform."""
await platform.async_reset()
tasks = [platform.async_setup(p_config) for p_config in platform_configs]
await asyncio.gather(*tasks)
async def async_integration_yaml_config(
hass: HomeAssistant, integration_name: str
) -> ConfigType | None:
"""Fetch the latest yaml configuration for an integration."""
integration = await async_get_integration(hass, integration_name)
return await conf_util.async_process_component_config(
hass, await conf_util.async_hass_config_yaml(hass), integration
)
@callback
def async_get_platform_without_config_entry(
hass: HomeAssistant, integration_name: str, integration_platform_name: str
) -> EntityPlatform | None:
"""Find an existing platform that is not a config entry."""
for integration_platform in async_get_platforms(hass, integration_name):
if integration_platform.config_entry is not None:
continue
if integration_platform.domain == integration_platform_name:
platform: EntityPlatform = integration_platform
return platform
return None
async def async_setup_reload_service(
hass: HomeAssistant, domain: str, platforms: Iterable[str]
) -> None:
"""Create the reload service for the domain."""
if hass.services.has_service(domain, SERVICE_RELOAD):
return
async def _reload_config(call: Event) -> None:
"""Reload the platforms."""
await async_reload_integration_platforms(hass, domain, platforms)
hass.bus.async_fire(f"event_{domain}_reloaded", context=call.context)
hass.helpers.service.async_register_admin_service(
domain, SERVICE_RELOAD, _reload_config
)
def setup_reload_service(
hass: HomeAssistant, domain: str, platforms: Iterable[str]
) -> None:
"""Sync version of async_setup_reload_service."""
asyncio.run_coroutine_threadsafe(
async_setup_reload_service(hass, domain, platforms),
hass.loop,
).result()
| mezz64/home-assistant | homeassistant/helpers/reload.py | Python | apache-2.0 | 5,950 |
class Solution:
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
l1 = len(word1)
l2 = len(word2)
if l1 == 0 and l2 != 0:
return l2
if l1 != 0 and l2 == 0:
return l1
matrix = [[0]*(l2+1) for i in range(0, l1+1)]
for i in range(0, l1+1):
for j in range(0, l2+1):
if i == 0:
matrix[i][j] = j
elif j == 0:
matrix[i][j] = i
else:
if word1[i-1] == word2[j-1]:
matrix[i][j] = matrix[i-1][j-1]
else:
matrix[i][j] = min(matrix[i][j-1], matrix[i-1][j], matrix[i-1][j-1]) + 1
return matrix[l1][l2]
| MingfeiPan/leetcode | dp/72.py | Python | apache-2.0 | 859 |
# Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build script for the python controller bindings."""
import os
import subprocess
import sys
from setuptools import Extension
from setuptools import setup
from setuptools.command.build_ext import build_ext
class CMakeExtension(Extension):
"""Extension to record the directory to run cmake on."""
def __init__(self, name, sourcedir, cmake):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
self.cmake = cmake
class CMakeBuild(build_ext):
"""Runs cmake."""
def build_extension(self, ext):
output_directory = os.path.abspath(
os.path.dirname(self.get_ext_fullpath(ext.name)))
# required for auto-detection of auxiliary "native" libs
if not output_directory.endswith(os.path.sep):
output_directory += os.path.sep
build_type = "Debug" if self.debug else "Release"
cmake_args = [
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY={}".format(output_directory),
"-DPYTHON_EXECUTABLE={}".format(sys.executable),
"-DDMR_PYTHON_VERSION={}.{}".format(sys.version_info.major,
sys.version_info.minor),
"-DCMAKE_BUILD_TYPE={}".format(build_type),
"-DDM_ROBOTICS_BUILD_TESTS=OFF",
"-DDM_ROBOTICS_BUILD_WHEEL=True",
"--log-level=VERBOSE",
]
version_script = os.environ.get("DM_ROBOTICS_VERSION_SCRIPT", None)
if version_script:
cmake_args.append(f"-DDM_ROBOTICS_VERSION_SCRIPT={version_script}",)
build_args = []
if "CMAKE_BUILD_PARALLEL_LEVEL" not in os.environ:
build_args += ["-j4"]
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
# Generate build files:
subprocess.check_call(
[ext.cmake] + cmake_args + ["-S", ext.sourcedir], cwd=self.build_temp)
# Build.
subprocess.check_call(
[ext.cmake, "--build", "."] + build_args, cwd=self.build_temp)
def _get_requirements(requirements_file): # pylint: disable=g-doc-args
"""Returns a list of dependencies for setup() from requirements.txt.
Currently a requirements.txt is being used to specify dependencies. In order
to avoid specifying it in two places, we're going to use that file as the
source of truth.
Lines starting with -r will be ignored. If the requirements are split across
multiple files, call this function multiple times instead and sum the results.
"""
def line_should_be_included(line):
return line and not line.startswith("-r")
with open(requirements_file) as f:
return [_parse_line(line) for line in f if line_should_be_included(line)]
def _parse_line(s):
"""Parses a line of a requirements.txt file."""
requirement, *_ = s.split("#")
return requirement.strip()
setup(
name="dm_robotics-controllers",
package_dir={"dm_robotics.controllers": ""},
packages=["dm_robotics.controllers"],
version="0.3.0",
license="Apache 2.0",
author="DeepMind",
description="Python bindings for dm_robotics/cpp/controllers",
long_description=open("controllers_py/README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/deepmind/dm_robotics/tree/main/cpp/controllers_py",
python_requires=">=3.7, <3.10",
setup_requires=["wheel >= 0.31.0"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering",
],
ext_modules=[
CMakeExtension(
"dm_robotics.controllers.cartesian_6d_to_joint_velocity_mapper",
sourcedir="",
cmake=os.environ.get("CMAKE_EXE", "cmake"))
],
cmdclass={"build_ext": CMakeBuild},
zip_safe=False,
)
| deepmind/dm_robotics | cpp/setup.py | Python | apache-2.0 | 4,478 |
import unittest
from gremlin_rest import GremlinClient
class TestClient(unittest.TestCase):
def setUp(self):
self.client = GremlinClient('http://172.17.0.248:8182')
def tearDown(self):
for v in self.client.V().run():
self.client.delete_vertex(vertex_id = v.vertex_id)
def test_add_vertex(self):
init_cnt = len(self.client.V().run())
v = self.client.addVertex(label = "a", a = 123, b = 'qweq').first()
print repr(v)
vs = self.client.V().run()
print repr(vs)
self.assertEqual(len(vs) - init_cnt, 1)
self.client.delete_vertex(vertex_id = v.vertex_id)
self.assertEqual(len(self.client.V().run()), init_cnt)
def test_query_vertices(self):
print 'before', repr(self.client.V().run())
v1 = self.client.addVertex(a = 123, b = 'qwe').first()
v2 = self.client.addVertex(a = 1234, b = 'qwe').first()
print 'all', repr(self.client.V().run())
print '12', repr(self.client.V().has('a', 12).run())
print '123', repr(self.client.V().has('a', 123).run())
print '1234', repr(self.client.V().has('a', 1234).run())
print 'qwe', repr(self.client.V().has('b', 'qwe').run())
self.assertEqual(len(self.client.V().has('a', 12).run()), 0)
self.assertEqual(len(self.client.V().has('a', 123).run()), 1)
self.assertEqual(len(self.client.V().has('a', 1234).run()), 1)
self.assertEqual(len(self.client.V().has('b', 'qwe').run()), 2)
self.client.delete_vertex(vertex_id = v1.vertex_id)
self.client.delete_vertex(vertex_id = v2.vertex_id)
if __name__ == '__main__':
unittest.main()
| windj007/python-gremlin-rest | gremlin_rest/tests/all_tests.py | Python | apache-2.0 | 1,682 |
import os
import pynja
import repo
@pynja.project
class java1(repo.JavaProject):
def emit(self):
sources = [
"com/java1/Counter.java",
]
with self.java_compile_ex(sources) as task:
task.workingDir = os.path.join(self.projectDir, "source")
self.jar_create("java1.jar")
| fifoforlifo/pynja | test2/code/java1/java1.py | Python | apache-2.0 | 331 |
from datetime import datetime
from app import app
from app.authentication import with_login
from flask import Blueprint, jsonify, request, Response
from app.generate_csv import generate_csv_clean
from app.msol_util import get_next_update_estimation_message_aws
from app.es.awsmetric import AWSMetric
from app.es.awsstat import AWSStat
from app.es.awsdetailedlineitem import AWSDetailedLineitem
from app.aws_keys import with_multiple_aws_accounts
from dateutil.relativedelta import relativedelta
from app.generate_csv import generate_csv
from app.cache import compressed_json, decompressed_json, cache, with_cache
from hashlib import sha256
from .. import AWS_KEY_PROCESSING_INTERVAL_HOURS
import itertools
import calendar
import config
aws_cost_stats_bp = Blueprint('aws_cost_stats_bp', __name__)
def cut_cost_by_product(products, cut):
res = []
other = {'product': 'Other Services', 'cost': 0}
i = 0
for p in products:
if i < cut and p['cost'] >= 0.01:
res.append(p)
else:
other['cost'] += p['cost']
i += 1
if other['cost'] >= 0.01:
res.append(other)
return res
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycost', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycost/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycost(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
total_cost:
type: number
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_monthly_cost(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)
return jsonify(data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/totalcost/<string:time_arg>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_totalcost(accounts, time_arg):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get total cost
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
total_cost:
type: number
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
this_day = now.replace(hour=0, minute=0, second=0, microsecond=0)
this_month = this_day.replace(day=1)
time_val = {
'ever': AWSDetailedLineitem.get_first_date([account.get_aws_user_id() for account in accounts]),
'currentyear': this_month - relativedelta(months=this_month.month),
'currentmonth': this_month,
}
date_from = time_val.get(time_arg, now)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)
return jsonify(raw_data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregion', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregion/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregion(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['intervals']['buckets']
res = [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'cost': region['cost']['value'],
}
for region in data['regions']['buckets']
],
}
for data in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region'), mimetype='text/csv')
return jsonify(months=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregionbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
byaccount=True,
date_from=date_from,
date_to=date_to)['accounts']['buckets']
res = [
{
'account_id': account['key'],
'account_name': [a.pretty for a in accounts if account['key'] == a.get_aws_user_id()][0],
'months': [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'cost': region['cost']['value'],
}
for region in data['regions']['buckets']
],
}
for data in account['intervals']['buckets']
]
}
for account in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region', account=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbytagbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyregionbytagbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyregionbytagbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by region for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
region:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
raw_data = AWSDetailedLineitem.get_cost_by_region(keys=[account.get_aws_user_id() for account in accounts],
tagged=True,
byaccount=True,
date_from=date_from,
date_to=date_to)['accounts']['buckets']
def tagged_cost(bucket, total):
total_tag = 0.0
for tag in bucket:
total_tag += tag['cost']['value']
yield (tag['key'], tag['cost']['value'])
if total != total_tag:
yield ('untagged', total - total_tag)
res = [
{
'account_id': account['key'],
'account_name': [a.pretty for a in accounts if a.get_aws_user_id() == account['key']][0],
'months': [
{
'month': data['key_as_string'].split('T')[0],
'regions': [
{
'region': region['key'],
'tags': [
{
'name': tag[0],
'cost': tag[1],
}
for tag in tagged_cost(region['tags']['buckets'], region['cost']['value'])
],
}
for region in data['regions']['buckets']
],
}
for data in account['intervals']['buckets']
]
}
for account in raw_data
]
if 'csv' in request.args:
return Response(generate_csv(res, 'regions', 'region', account=True, tagged=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/dailycostbyproduct', defaults={'nb_days': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/dailycostbyproduct/<int:nb_days>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_dailycostbyproduct(accounts, nb_days):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get daily costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
days:
type: array
items:
properties:
day:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow().replace(hour=23, minute=59, second=59, microsecond=999999)
now = AWSDetailedLineitem.get_last_date([account.get_aws_user_id() for account in accounts], limit=now)
date_from = now.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(days=nb_days)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999) - relativedelta(days=1)
data = AWSDetailedLineitem.get_daily_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['days']
for d in data:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(days=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproduct', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproduct/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproduct(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow().replace(hour=23, minute=59, second=59, microsecond=999999)
now = AWSDetailedLineitem.get_last_date([account.get_aws_user_id() for account in accounts], limit=now)
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1], hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_monthly_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['months']
for d in data:
if 'csv' not in request.args:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
if 'csv' in request.args:
return Response(generate_csv(data, 'products', 'product'), mimetype='text/csv')
return jsonify(months=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproductbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
month = nb_months - 1
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=month)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
res = [
{
'account_id': account.get_aws_user_id(),
'account_name': account.pretty,
'months': AWSDetailedLineitem.get_monthly_cost_by_product(keys=account.get_aws_user_id(),
date_from=date_from,
date_to=date_to)['months'],
}
for account in accounts
]
if 'csv' in request.args:
return Response(generate_csv(res, 'products', 'product', account=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbytagbyaccount', defaults={'nb_months': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/monthlycostbyproductbytagbyaccount/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_monthlycostbyproductbytagbyaccount(accounts, nb_months):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get monthly costs summed by product for each account
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
months:
type: array
items:
properties:
month:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
month = nb_months - 1
date_from = now.replace(day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=month)
date_to = now.replace(day=calendar.monthrange(now.year, now.month)[1],
hour=23, minute=59, second=59, microsecond=999999)
res = [
{
'account_id': account.get_aws_user_id(),
'account_name': account.pretty,
'months': AWSDetailedLineitem.get_monthly_cost_by_product(keys=account.get_aws_user_id(),
tagged=True,
date_from=date_from,
date_to=date_to)['months'],
}
for account in accounts
]
if 'csv' in request.args:
return Response(generate_csv(res, 'products', 'product', account=True, tagged=True), mimetype='text/csv')
return jsonify(accounts=res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/yearlycostbyproduct', defaults={'nb_years': 3})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/yearlycostbyproduct/<int:nb_years>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_yearlycostbyproduct(accounts, nb_years):
"""---
get:
tags:
- aws
produces:
- application/json
description: &desc Get yearly costs summed by product
summary: *desc
responses:
200:
description: List of AWS accounts
schema:
properties:
years:
type: array
items:
properties:
year:
type: string
products:
type: array
items:
properties:
cost:
type: number
product:
type: string
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
now = datetime.utcnow()
date_from = now.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0) - relativedelta(years=nb_years - 1)
date_to = now.replace(month=12, day=31, hour=23, minute=59, second=59, microsecond=999999)
data = AWSDetailedLineitem.get_yearly_cost_by_product(keys=[account.get_aws_user_id() for account in accounts],
date_from=date_from,
date_to=date_to)['years']
for d in data:
d['products'] = cut_cost_by_product(sorted(d['products'], key=lambda x: x['cost'], reverse=True), int(request.args['show']) - 1 if 'show' in request.args else 9)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(years=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/months')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_months(accounts):
raw_data = AWSDetailedLineitem.get_first_to_last_date([account.get_aws_user_id() for account in accounts])
if not raw_data:
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(months=[data.strftime("%Y-%m-01") for data in raw_data])
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/categories')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_month_categories_m(accounts, month):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = AWSDetailedLineitem.get_cost_by_resource([account.get_aws_user_id() for account in accounts], date_from=date_from)
cat = []
max_cat = 0
for new in raw_data:
x = 1
while new['cost'] > x:
x *= 10
if x >= max_cat:
max_cat = x
elif '<{}'.format(x) not in cat:
cat.append('<{}'.format(x))
cat.append('>{}'.format(max_cat / 10))
return jsonify(categories=cat)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/chart')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_month_chart_m(accounts, month):
# TODO: Use ES agg to categorize
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = [
AWSDetailedLineitem.get_cost_by_resource(account.get_aws_user_id(), date_from=date_from)
for account in accounts
]
data = []
def get_cat_with_cost(cost):
x = 1
while cost > x:
x *= 10
return x
def add_resource_in_data(new):
new_cat = get_cat_with_cost(new['cost'])
for cat in data:
if cat['category'] == '<{}'.format(new_cat):
cat['total'] += new['cost']
return
data.append(dict(category='<{}'.format(new_cat), total=new['cost']))
for one in raw_data:
for new in one:
add_resource_in_data(new)
if not len(data):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
max_cat = 0
for i in range(len(data)):
if len(data[i]['category']) > len(data[max_cat]['category']):
max_cat = i
data[max_cat]['category'] = data[max_cat]['category'][:-1]
data[max_cat]['category'] = data[max_cat]['category'].replace('<', '>', 1)
return jsonify(categories=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/<category>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_m(accounts, month, category):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
assert category[0] in ['<', '>']
cat = int(category[1:])
except:
return jsonify(error='Not found.'), 404
raw_data = AWSDetailedLineitem.get_cost_by_resource([account.get_aws_user_id() for account in accounts], date_from=date_from)
def transform(r):
r['resource_name'] = r['resource']
return r
minus = category[0] == '<'
data = [
transform(r)
for r in raw_data
if (minus and cat > r['cost'] >= cat / 10) or (not minus and r['cost'] > cat)
]
if len(data) <= 0:
return jsonify(error='Not found.'), 404
return jsonify(category=dict(resources=data, total=sum([x['cost'] for x in data])))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbyresource/<month>/search/<search>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_resource_search_m(accounts, month, search):
try:
date_from = datetime.strptime(month, "%Y-%m-%d")
except:
return jsonify(error='Not found.'), 404
raw_data = [
AWSDetailedLineitem.get_cost_by_resource(account.get_aws_user_id(), date_from=date_from, search=search)
for account in accounts
]
def transform(r):
r['resource_name'] = r['resource']
return r
data = [
transform(r)
for raw in raw_data
for r in raw
]
if not len(data):
return jsonify(error='Not found.'), 404
return jsonify(search_result=data)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/tags')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags(accounts):
tags = AWSDetailedLineitem.get_available_tags([account.get_aws_user_id() for account in accounts])['tags']
if not len(tags):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/tags_only_with_data')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags_with_data(accounts):
tags = list(set(itertools.chain.from_iterable(
AWSDetailedLineitem.get_available_tags(account.get_aws_user_id(), only_with_data=account.key)['tags']
for account in accounts
)))
if not len(tags):
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbytag/<path:tag>', defaults={'nb_months': 5})
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/costbytag/<path:tag>/<int:nb_months>')
@with_login()
@with_multiple_aws_accounts()
def aws_cost_by_tags_months(accounts, nb_months, tag):
date_to = datetime.now()
date_from = date_to.replace(day=1, minute=0, second=0, microsecond=0) - relativedelta(months=nb_months - 1)
return jsonify(AWSDetailedLineitem.get_monthly_cost_by_tag([account.get_aws_user_id() for account in accounts], tag, date_from=date_from, date_to=date_to))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/underutilized')
@with_login()
@with_multiple_aws_accounts()
def aws_underutilized_resources(accounts):
return jsonify(AWSMetric.underutilized_resources(account.key for account in accounts))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/underutilizedreducedcost')
@with_login()
@with_multiple_aws_accounts()
def aws_underutilized_resources_reduced_cost(accounts):
now = datetime.utcnow()
date_from = now.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=6)
date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)
resources = AWSMetric.underutilized_resources(account.key for account in accounts)
resource_ids = set(r['id'] for r in resources['resources'])
months = AWSDetailedLineitem.get_monthly_cost_by_resource(resource_ids, date_from=date_from, date_to=date_to)
res = { # Simply multiply every cost by 20% as all instances usage is
k: v * 0.2 # less than 20%. TODO: intelligently find the best type
for k, v in months.iteritems()
}
return jsonify(res)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/usagecost')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_usagecost(accounts):
def get_account_data(account):
for date, cpu_usage in dict(AWSMetric.daily_cpu_utilization(account.key)).iteritems():
yield (date, cpu_usage, None)
for date, cost in dict(AWSDetailedLineitem.get_ec2_daily_cost(account.get_aws_user_id())).iteritems():
yield (date, None, cost)
@with_cache()
def get_all_account_data():
return list(
itertools.chain.from_iterable(
get_account_data(account)
for account in accounts
)
)
data = get_all_account_data()
days = {}
for day, cpu_usage, cost in data:
day_data = days.setdefault(day, {'day': day, 'cpu': None, 'cost': None})
if cpu_usage is not None:
day_data['cpu'] = (day_data['cpu'] or 0.0) + cpu_usage
if cost is not None:
day_data['cost'] = (day_data['cost'] or 0.0) + cost
res = sorted([
value
for value in days.itervalues()
if value['cpu'] is not None and value['cost'] is not None # Comment/remove if None values are OK
], key=lambda e: e['day'])
if not res:
return jsonify(message=get_next_update_estimation_message_aws(accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
return jsonify(days=res)
def _build_list_used_transfer_types(stat_list):
return frozenset(
elem['type']
for bucket in stat_list
for elem in bucket['transfer_stats']
)
def _check_if_in_list(dict_list, value, key):
return next((item for item in dict_list if item[key] == value), None)
def _append_to_header_list(header_list, new_data):
for elem in new_data:
if elem not in header_list:
header_list.append(elem)
return header_list
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3buckettags')
@with_login()
@with_multiple_aws_accounts()
def aws_get_resource_tags_for_s3(accounts):
tags = list(set(itertools.chain.from_iterable(
AWSDetailedLineitem.get_available_tags(
account.get_aws_user_id(),
product_name='Simple Storage Service',
)['tags']
for account in accounts
)))
return jsonify(tags=sorted(tags, key=unicode.lower))
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3bucketsizepername')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_s3bucketsizepername(accounts):
"""---
get:
tags:
- aws
produces:
- application/csv
description: &desc Stats about cost and usage of bandwith and storag on s3 buckets, organised by name
summary: *desc
responses:
200:
description: Stats about cost and usage of bandwith and storag on s3 buckets, organised by name
403:
description: Not logged in
404:
description: AWS account not registered
"""
def _create_bandwith_breakdown(transfer_types_list, csv_row, bucket_bandwith_stat):
for elem in transfer_types_list:
_current_transfer_type = _check_if_in_list(bucket_bandwith_stat['transfer_stats'], elem, 'type')
if _current_transfer_type is not None:
csv_row[elem] = _current_transfer_type['data'] * 1024 * 1024 * 1024 # The is by default given in GB
return csv_row
def _create_csv_rows(bucket_list, account, bandwith_cost, csv_row_all):
if bucket_list is None:
return []
for bucket in bucket_list['buckets']:
csv_row = {
'account_id': account.get_aws_user_id(),
'used_space': bucket['used_space'],
'name': bucket['name'],
'storage_cost': _check_if_in_list(bucket['prices'], bucket['provider'], 'provider')['cost']
}
bucket_bandwith_stat = _check_if_in_list(bandwith_cost, bucket['name'], 'bucket_name')
if bucket_bandwith_stat is not None:
csv_row = _create_bandwith_breakdown(transfer_types_list, csv_row, bucket_bandwith_stat)
csv_row['bandwith_cost'] = bucket_bandwith_stat['cost'] if bucket_bandwith_stat is not None else 0
csv_row['total_cost'] = csv_row['storage_cost'] + csv_row['bandwith_cost']
csv_row_all.append(csv_row)
return csv_row_all
assert len(accounts) > 0
csv_header = ['account_id', 'name', 'used_space', 'storage_cost', 'bandwith_cost', 'total_cost']
csv_row_all = []
for account in accounts:
bucket_list = AWSStat.latest_s3_space_usage(account)
bucket_ids = [
bucket['name']
for bucket in (bucket_list['buckets'] if bucket_list is not None else [])
]
bandwith_cost = AWSDetailedLineitem.get_s3_bandwith_info_and_cost_per_name(account.get_aws_user_id(), bucket_ids)
transfer_types_list = _build_list_used_transfer_types(bandwith_cost)
csv_header = _append_to_header_list(csv_header, transfer_types_list)
csv_row_all = _create_csv_rows(bucket_list, account, bandwith_cost, csv_row_all)
if len(csv_row_all) > 0 and csv_row_all[0] is None:
csv_row_all = []
if 'csv' in request.args:
return Response(generate_csv_clean(csv_row_all, csv_header))
return jsonify(accounts=csv_row_all)
@app.route('/aws/accounts/<aws_key_ids:account_ids>/stats/s3bucketsizepertag/<path:tag>')
@with_login()
@with_multiple_aws_accounts()
def aws_accounts_m_stats_s3bucketsizepertag(accounts, tag):
"""---
get:
tags:
- aws
produces:
- application/csv
description: &desc Stats about cost and usage of bandwith and storag on s3 buckets, organised by tag
summary: *desc
responses:
200:
description: Stats about cost and usage of bandwith and storag on s3 buckets, organised by tag
403:
description: Not logged in
404:
description: AWS account not registered
"""
assert len(accounts) > 0
def _get_total_sizes_cost_and_names(bucket_names_list, bucket_list):
total_size = 0
total_cost = 0
names = ""
for bucket in bucket_list['buckets']:
if _check_if_in_list(bucket_names_list, bucket['name'], 'bucket_name') is not None:
total_size += float(bucket['used_space'])
total_cost += _check_if_in_list(bucket['prices'], bucket['provider'], 'provider')['cost']
names += bucket['name'] + ", "
return total_size, names[:-2], total_cost
def _get_bandwith_info(account, bucket_names):
bucket_ids = [
bucket
for bucket in (bucket_names if isinstance(bucket_names, list) else [bucket_names])
]
bandwith_cost = AWSDetailedLineitem.get_s3_bandwith_info_and_cost_per_name(account.get_aws_user_id(), bucket_ids)
return bandwith_cost
def _iterate_over_buckets_in_tag_for_total(bucket_bandwith_stat):
total_cost = 0
for bucket in (bucket_bandwith_stat if bucket_bandwith_stat is not None else []):
total_cost += bucket['cost']
return total_cost
def _iterate_over_buckets_and_make_breakdown_bandwith_stat(bucket_bandwith_stat, buff_row_csv, tag_value):
bandwith_cost = 0
for bucket in bucket_bandwith_stat:
bandwith_cost += bucket['cost']
for elem in bucket['transfer_stats']:
if elem['type'] in buff_row_csv:
buff_row_csv[elem['type']] += (elem['data'] * 1024 * 1024 * 1024)
else:
buff_row_csv[elem['type']] = (elem['data'] * 1024 * 1024 * 1024)
buff_row_csv['bandwith_cost'] = bandwith_cost
return buff_row_csv
def _build_csv_row_and_add_header(bucket_list_tagged, bucket_list, account, csv_header, csv_row_all):
if bucket_list_tagged is None:
return [], []
for tag_value in bucket_list_tagged['tag_value']:
bucket_info = _get_total_sizes_cost_and_names(tag_value['s3_buckets'], bucket_list)
bucket_bandwith_stat = _get_bandwith_info(account, bucket_info[1])
csv_header = _append_to_header_list(csv_header, _build_list_used_transfer_types(bucket_bandwith_stat))
csv_row = {
"tag_key": bucket_list_tagged['tag_key'].split(':')[1],
"tag_value": tag_value['tag_value'],
"account_id": tag_value['s3_buckets'][0]["account_id"],
"total_size": bucket_info[0],
"bucket_names": bucket_info[1],
"storage_cost": bucket_info[2],
}
csv_row = _iterate_over_buckets_and_make_breakdown_bandwith_stat(bucket_bandwith_stat, csv_row, tag_value)
csv_row['total_cost'] = csv_row['storage_cost'] + csv_row['bandwith_cost']
csv_row_all.append(csv_row)
return csv_header, csv_row_all
def _select_bucket_list_tag(bucket_list_per_tag, tag):
for bucket_list_tagged in bucket_list_per_tag:
if tag in bucket_list_tagged['tag_key'].split(':')[1]:
return bucket_list_tagged
csv_header = ["account_id", "tag_key", "tag_value", "total_size", "bucket_names", "bandwith_cost", "storage_cost", "total_cost"]
csv_data = []
for account in accounts:
bucket_list_per_tag = AWSDetailedLineitem.get_s3_buckets_per_tag(account.get_aws_user_id())
bucket_list_tagged = _select_bucket_list_tag(bucket_list_per_tag, tag)
bucket_list = AWSStat.latest_s3_space_usage(account)
csv_header, csv_data = _build_csv_row_and_add_header(bucket_list_tagged, bucket_list, account, csv_header, csv_data)
if 'csv' in request.args:
return Response(generate_csv_clean(csv_data, csv_header))
return jsonify(res=csv_data)
| giubil/trackit | api/files/api/app/views/aws/cost/stats.py | Python | apache-2.0 | 43,920 |
# Copyright 2015 6WIND S.A.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# base timestamp
BASE_TIMESTAMP = '1970-01-01 00:00:00'
# linuxbridge name prefix
BRIDGE_PREFIX = 'brq'
# fp agent type
FP_AGENT_TYPE = '6WIND Fast Path agent'
# vhostuser socket settings
VHOSTUSER_SOCKET_DIR = '/tmp'
VHOSTUSER_SOCKET_MODE = 'client'
VHOSTUSER_SOCKET_PREFIX = 'vhost-socket-'
# vhostuser fp plug
# duplicate this constant from nova.network.model.py,
# in case of containerized setups: nova modules can be installed
# in another container than neutron ML2 plugins
VIF_DETAILS_VHOSTUSER_FP_PLUG = 'vhostuser_fp_plug'
| openstack/networking-6wind | networking_6wind/common/constants.py | Python | apache-2.0 | 1,169 |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Node for an OPENQASM custom gate statement.
"""
from ._node import Node
class CustomUnitary(Node):
"""Node for an OPENQASM custom gate statement.
children[0] is an id node.
children[1] is an exp_list (if len==3) or primary_list.
children[2], if present, is a primary_list.
Has properties:
.id = id node
.name = gate name string
.arguments = None or exp_list node
.bitlist = primary_list node
"""
def __init__(self, children):
"""Create the custom gate node."""
Node.__init__(self, 'custom_unitary', children, None)
self.id = children[0]
self.name = self.id.name
if len(children) == 3:
self.arguments = children[1]
self.bitlist = children[2]
else:
self.arguments = None
self.bitlist = children[1]
def qasm(self, prec=15):
"""Return the corresponding OPENQASM string."""
string = self.name
if self.arguments is not None:
string += "(" + self.arguments.qasm(prec) + ")"
string += " " + self.bitlist.qasm(prec) + ";"
return string
| ChristopheVuillot/qiskit-sdk-py | qiskit/qasm/_node/_customunitary.py | Python | apache-2.0 | 1,893 |
# start the service
oculusrift = Runtime.start("oculusrift","OculusRift") | MyRobotLab/pyrobotlab | service/OculusRift.py | Python | apache-2.0 | 73 |
"""Tests for the DFS module"""
import unittest
from dfs import dfsTraverse
class test_dfsTraverse(unittest.TestCase):
"""Test the correct order in traversing a graph"""
def setUp(self):
"""Create a graph and a tuple with the correct traverse"""
self.correctResTup = ('a', 'b', 'e', 'g', 'f', 'c', 'h', 'd')
self.graphDict = {'a': ('b', 'g', 'd'),
'b': ('e', 'a', 'f'),
'd': ('a', 'f'),
'e': ('b', 'g'),
'g': ('e', 'a'),
'f': ('b', 'd', 'c'),
'c': ('f', 'h'),
'h': ('c')}
def test_traverse(self):
"""Test the traverse function"""
result = dfsTraverse(self.graphDict, 'a')
self.assertEqual(result, self.correctResTup)
if __name__ == '__main__':
unittest.main()
| radome/algorithms_and_data_structures | Python/test/test_dfs.py | Python | apache-2.0 | 911 |
# coding: utf-8
"""
Vericred API
Vericred's API allows you to search for Health Plans that a specific doctor
accepts.
## Getting Started
Visit our [Developer Portal](https://developers.vericred.com) to
create an account.
Once you have created an account, you can create one Application for
Production and another for our Sandbox (select the appropriate Plan when
you create the Application).
## SDKs
Our API follows standard REST conventions, so you can use any HTTP client
to integrate with us. You will likely find it easier to use one of our
[autogenerated SDKs](https://github.com/vericred/?query=vericred-),
which we make available for several common programming languages.
## Authentication
To authenticate, pass the API Key you created in the Developer Portal as
a `Vericred-Api-Key` header.
`curl -H 'Vericred-Api-Key: YOUR_KEY' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Versioning
Vericred's API default to the latest version. However, if you need a specific
version, you can request it with an `Accept-Version` header.
The current version is `v3`. Previous versions are `v1` and `v2`.
`curl -H 'Vericred-Api-Key: YOUR_KEY' -H 'Accept-Version: v2' "https://api.vericred.com/providers?search_term=Foo&zip_code=11215"`
## Pagination
Endpoints that accept `page` and `per_page` parameters are paginated. They expose
four additional fields that contain data about your position in the response,
namely `Total`, `Per-Page`, `Link`, and `Page` as described in [RFC-5988](https://tools.ietf.org/html/rfc5988).
For example, to display 5 results per page and view the second page of a
`GET` to `/networks`, your final request would be `GET /networks?....page=2&per_page=5`.
## Sideloading
When we return multiple levels of an object graph (e.g. `Provider`s and their `State`s
we sideload the associated data. In this example, we would provide an Array of
`State`s and a `state_id` for each provider. This is done primarily to reduce the
payload size since many of the `Provider`s will share a `State`
```
{
providers: [{ id: 1, state_id: 1}, { id: 2, state_id: 1 }],
states: [{ id: 1, code: 'NY' }]
}
```
If you need the second level of the object graph, you can just match the
corresponding id.
## Selecting specific data
All endpoints allow you to specify which fields you would like to return.
This allows you to limit the response to contain only the data you need.
For example, let's take a request that returns the following JSON by default
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890',
field_we_dont_care_about: 'value_we_dont_care_about'
},
states: [{
id: 1,
name: 'New York',
code: 'NY',
field_we_dont_care_about: 'value_we_dont_care_about'
}]
}
```
To limit our results to only return the fields we care about, we specify the
`select` query string parameter for the corresponding fields in the JSON
document.
In this case, we want to select `name` and `phone` from the `provider` key,
so we would add the parameters `select=provider.name,provider.phone`.
We also want the `name` and `code` from the `states` key, so we would
add the parameters `select=states.name,states.code`. The id field of
each document is always returned whether or not it is requested.
Our final request would be `GET /providers/12345?select=provider.name,provider.phone,states.name,states.code`
The response would be
```
{
provider: {
id: 1,
name: 'John',
phone: '1234567890'
},
states: [{
id: 1,
name: 'New York',
code: 'NY'
}]
}
```
## Benefits summary format
Benefit cost-share strings are formatted to capture:
* Network tiers
* Compound or conditional cost-share
* Limits on the cost-share
* Benefit-specific maximum out-of-pocket costs
**Example #1**
As an example, we would represent [this Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/33602TX0780032.pdf) as:
* **Hospital stay facility fees**:
- Network Provider: `$400 copay/admit plus 20% coinsurance`
- Out-of-Network Provider: `$1,500 copay/admit plus 50% coinsurance`
- Vericred's format for this benefit: `In-Network: $400 before deductible then 20% after deductible / Out-of-Network: $1,500 before deductible then 50% after deductible`
* **Rehabilitation services:**
- Network Provider: `20% coinsurance`
- Out-of-Network Provider: `50% coinsurance`
- Limitations & Exceptions: `35 visit maximum per benefit period combined with Chiropractic care.`
- Vericred's format for this benefit: `In-Network: 20% after deductible / Out-of-Network: 50% after deductible | limit: 35 visit(s) per Benefit Period`
**Example #2**
In [this other Summary of Benefits & Coverage](https://s3.amazonaws.com/vericred-data/SBC/2017/40733CA0110568.pdf), the **specialty_drugs** cost-share has a maximum out-of-pocket for in-network pharmacies.
* **Specialty drugs:**
- Network Provider: `40% coinsurance up to a $500 maximum for up to a 30 day supply`
- Out-of-Network Provider `Not covered`
- Vericred's format for this benefit: `In-Network: 40% after deductible, up to $500 per script / Out-of-Network: 100%`
**BNF**
Here's a description of the benefits summary string, represented as a context-free grammar:
```
root ::= coverage
coverage ::= (simple_coverage | tiered_coverage) (space pipe space coverage_modifier)?
tiered_coverage ::= tier (space slash space tier)*
tier ::= tier_name colon space (tier_coverage | not_applicable)
tier_coverage ::= simple_coverage (space (then | or | and) space simple_coverage)* tier_limitation?
simple_coverage ::= (pre_coverage_limitation space)? coverage_amount (space post_coverage_limitation)? (comma? space coverage_condition)?
coverage_modifier ::= limit_condition colon space (((simple_coverage | simple_limitation) (semicolon space see_carrier_documentation)?) | see_carrier_documentation | waived_if_admitted | shared_across_tiers)
waived_if_admitted ::= ("copay" space)? "waived if admitted"
simple_limitation ::= pre_coverage_limitation space "copay applies"
tier_name ::= "In-Network-Tier-2" | "Out-of-Network" | "In-Network"
limit_condition ::= "limit" | "condition"
tier_limitation ::= comma space "up to" space (currency | (integer space time_unit plural?)) (space post_coverage_limitation)?
coverage_amount ::= currency | unlimited | included | unknown | percentage | (digits space (treatment_unit | time_unit) plural?)
pre_coverage_limitation ::= first space digits space time_unit plural?
post_coverage_limitation ::= (((then space currency) | "per condition") space)? "per" space (treatment_unit | (integer space time_unit) | time_unit) plural?
coverage_condition ::= ("before deductible" | "after deductible" | "penalty" | allowance | "in-state" | "out-of-state") (space allowance)?
allowance ::= upto_allowance | after_allowance
upto_allowance ::= "up to" space (currency space)? "allowance"
after_allowance ::= "after" space (currency space)? "allowance"
see_carrier_documentation ::= "see carrier documentation for more information"
shared_across_tiers ::= "shared across all tiers"
unknown ::= "unknown"
unlimited ::= /[uU]nlimited/
included ::= /[iI]ncluded in [mM]edical/
time_unit ::= /[hH]our/ | (((/[cC]alendar/ | /[cC]ontract/) space)? /[yY]ear/) | /[mM]onth/ | /[dD]ay/ | /[wW]eek/ | /[vV]isit/ | /[lL]ifetime/ | ((((/[bB]enefit/ plural?) | /[eE]ligibility/) space)? /[pP]eriod/)
treatment_unit ::= /[pP]erson/ | /[gG]roup/ | /[cC]ondition/ | /[sS]cript/ | /[vV]isit/ | /[eE]xam/ | /[iI]tem/ | /[sS]tay/ | /[tT]reatment/ | /[aA]dmission/ | /[eE]pisode/
comma ::= ","
colon ::= ":"
semicolon ::= ";"
pipe ::= "|"
slash ::= "/"
plural ::= "(s)" | "s"
then ::= "then" | ("," space) | space
or ::= "or"
and ::= "and"
not_applicable ::= "Not Applicable" | "N/A" | "NA"
first ::= "first"
currency ::= "$" number
percentage ::= number "%"
number ::= float | integer
float ::= digits "." digits
integer ::= /[0-9]/+ (comma_int | under_int)*
comma_int ::= ("," /[0-9]/*3) !"_"
under_int ::= ("_" /[0-9]/*3) !","
digits ::= /[0-9]/+ ("_" /[0-9]/+)*
space ::= /[ \t]/+
```
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import vericred_client
from vericred_client.rest import ApiException
from vericred_client.models.county_bulk import CountyBulk
class TestCountyBulk(unittest.TestCase):
""" CountyBulk unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCountyBulk(self):
"""
Test CountyBulk
"""
model = vericred_client.models.county_bulk.CountyBulk()
if __name__ == '__main__':
unittest.main()
| vericred/vericred-python | test/test_county_bulk.py | Python | apache-2.0 | 9,989 |
"""
Defines a legend for displaying components.
:copyright: 2015 Agile Geoscience
:license: Apache 2.0
"""
# from builtins import object
from io import StringIO
import csv
import warnings
import random
import re
import itertools
try:
from functools import partialmethod
except: # Python 2
from utils import partialmethod
import numpy as np
from matplotlib import patches
import matplotlib.pyplot as plt
from .component import Component
from . import utils
from .defaults import LEGEND__NSDOE
from .defaults import LEGEND__Canstrat
from .defaults import LEGEND__NAGMDM__6_2
from .defaults import LEGEND__NAGMDM__6_1
from .defaults import LEGEND__NAGMDM__4_3
from .defaults import LEGEND__SGMC
from .defaults import TIMESCALE__ISC
from .defaults import TIMESCALE__USGS_ISC
from .defaults import TIMESCALE__DNAG
###############################################
# This module is not used directly, but must
# be imported in order to register new hatches.
from . import hatches # DO NOT DELETE
###############################################
class LegendError(Exception):
"""
Generic error class.
"""
pass
class Decor(object):
"""
A single display style. A Decor describes how to display a given set
of Component properties.
In general, you will not usually use a Decor on its own. Instead, you
will want to use a Legend, which is just a list of Decors, and leave
the Decors to the Legend.
Args:
params (dict): The parameters you want in the Decor. There must be a
Component to attach the decoration to, and at least 1 other attribute.
It's completely up to you, but you probably want at least a colour
(hex names like #AAA or #d3d3d3, or matplotlib's English-language
names listed at http://ageo.co/modelrcolour are acceptable.
The only other parameter the class recognizes for now is 'width',
which is the width of the striplog element.
Example:
my_rock = Component({ ... })
d = {'component': my_rock, 'colour': 'red'}
my_decor = Decor(d)
"""
def __init__(self, *params, **kwargs):
"""
Supports the passing in of a single dictionary, or the passing of
keyword arguments.
Possibly a bad idea; review later.
"""
for p in params:
params = p
for k, v in kwargs.items() or params.items():
k = k.lower().replace(' ', '_')
if k in ['colour', 'color']:
k = 'colour'
if not v:
v = '#eeeeee'
try:
v = v.lower()
except AttributeError:
v = v
setattr(self, k, v)
if (getattr(self, 'component', None) is None) and (getattr(self, 'curve', None) is None):
raise LegendError("You must provide a Component to decorate.")
if len(self.__dict__) < 2:
raise LegendError("You must provide at least one decoration.")
# Make sure we have a width, and it's a float, even if it's None.
try:
self.width = float(getattr(self, 'width', None))
except (TypeError, ValueError):
self.width = None
# Make sure we have a hatch, even if it's None. And correct 'none's.
self.hatch = getattr(self, 'hatch', None)
if self.hatch == 'none':
self.hatch = None
def __repr__(self):
s = repr(self.__dict__)
return "Decor({0})".format(s)
def __str__(self):
s = str(self.__dict__)
return "Decor({0})".format(s)
def __add__(self, other):
if isinstance(other, self.__class__):
result = [self, other]
return Legend(result)
elif isinstance(other, Legend):
return other + self
else:
raise LegendError("You can only add legends or decors.")
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
# Weed out empty elements
s = {k: v for k, v in self.__dict__.items() if v}
o = {k: v for k, v in other.__dict__.items() if v}
# Compare
if s == o:
return True
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
# If we define __eq__ we also need __hash__ otherwise the object
# becomes unhashable. All this does is hash the frozenset of the
# keys. (You can only hash immutables.)
def __hash__(self):
return hash(frozenset(self.__dict__.keys()))
def _repr_html_(self):
"""
Jupyter Notebook magic repr function.
"""
rows, c = '', ''
s = '<tr><td><strong>{k}</strong></td><td style="{stl}">{v}</td></tr>'
for k, v in self.__dict__.items():
if k == '_colour':
k = 'colour'
c = utils.text_colour_for_hex(v)
style = 'color:{}; background-color:{}'.format(c, v)
else:
style = 'color:black; background-color:white'
if k == 'component':
try:
v = v._repr_html_()
except AttributeError:
v = v.__repr__()
rows += s.format(k=k, v=v, stl=style)
html = '<table>{}</table>'.format(rows)
return html
def _repr_html_row_(self, keys):
"""
Jupyter Notebook magic repr function as a row – used by
``Legend._repr_html_()``.
"""
tr, th, c = '', '', ''
r = '<td style="{stl}">{v}</td>'
h = '<th>{k}</th>'
for k in keys:
v = self.__dict__.get(k)
if k == '_colour':
k = 'colour'
c = utils.text_colour_for_hex(v)
style = 'color:{}; background-color:{}'.format(c, v)
else:
style = 'color:black; background-color:white'
if k == 'component':
try:
v = v._repr_html_()
except AttributeError:
v = v.__repr__()
tr += r.format(v=v, stl=style)
th += h.format(k=k)
return th, tr
@property
def colour(self):
return self._colour
@colour.setter
def colour(self, c):
numbers = r'([\.0-9]+), ?([\.0-9]+), ?([\.0-9]+)'
pattern = re.compile(r'[\(\[]?' + numbers + r'[\)\]]?')
try:
x = pattern.search(c)
except:
x = None
if x is not None:
try:
x = list(map(float, x.groups()))
if x[0] > 1 or x[1] > 1 or x[2] > 1:
x = [int(i) for i in x]
colour = utils.rgb_to_hex(x)
except KeyError:
raise LegendError("Colour not recognized: " + c)
elif not c:
colour = '#eeeeee'
elif type(c) in [list, tuple]:
try:
colour = utils.rgb_to_hex(c)
except TypeError:
raise LegendError("Colour not recognized: " + c)
elif c[0] != '#':
try:
colour = utils.name_to_hex(c)
except KeyError:
raise LegendError("Colour not recognized: " + c)
elif (c[0] == '#') and (len(c) == 4):
# Three-letter hex
colour = c[:2] + c[1] + 2*c[2] + 2*c[3]
elif (c[0] == '#') and (len(c) == 8):
# 8-letter hex
colour = c[:-2]
else:
colour = c
self._colour = colour
@property
def rgb(self):
"""
Returns an RGB triple equivalent to the hex colour.
"""
return utils.hex_to_rgb(self.colour)
@property
def keys(self):
"""
Returns the keys of the Decor's dict.
"""
return list(self.__dict__.keys())
@classmethod
def random(cls, component, match_only=None):
"""
Returns a minimal Decor with a random colour.
"""
c = component.__dict__.copy()
if match_only is None:
match_only = c.keys()
for k in list(c.keys()):
if k not in match_only:
_ = c.pop(k)
colour = random.sample([i for i in range(256)], 3)
return cls({'colour': colour, 'component': Component(c), 'width': 1.0})
def plot(self, fmt=None, fig=None, ax=None):
"""
Make a simple plot of the Decor.
Args:
fmt (str): A Python format string for the component summaries.
fig (Pyplot figure): A figure, optional. Use either fig or ax, not
both.
ax (Pyplot axis): An axis, optional. Use either fig or ax, not
both.
Returns:
fig or ax or None. If you pass in an ax, you get it back. If you pass
in a fig, you get it. If you pass nothing, the function creates a
plot object as a side-effect.
"""
u = 4 # aspect ratio of decor plot
v = 0.25 # ratio of decor tile width
r = None
if (fig is None) and (ax is None):
fig = plt.figure(figsize=(u, 1))
else:
r = fig
if ax is None:
ax = fig.add_axes([0.1*v, 0.1, 0.8*v, 0.8])
else:
r = ax
rect1 = patches.Rectangle((0, 0),
u*v, u*v,
color=self.colour,
lw=1,
hatch=self.hatch,
ec='k')
ax.add_patch(rect1)
ax.text(1.0+0.1*v*u, u*v*0.5,
self.component.summary(fmt=fmt),
fontsize=max(u, 15),
verticalalignment='center',
horizontalalignment='left')
ax.set_xlim([0, u*v])
ax.set_ylim([0, u*v])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis()
return r
class Legend(object):
"""
A look-up table to assist in the conversion of Components to
a plot colour.
Args:
list_of_Decors (list): The decors to collect into a legend. In
general, you will want to leave legend building to the constructor
class methods, `Legend.default()`, and
`Legend.from_csv(text=string)`. We can add others over time, such
as `from_xls` and so on.
"""
def __init__(self, list_of_Decors):
self.table = [d.__dict__ for d in list_of_Decors]
self.__list = list_of_Decors
self.__index = 0
self._iter = iter(self.__list) # Set up iterable.
def __repr__(self):
s = [repr(d) for d in self.__list]
return "Legend({0})".format('\n'.join(s))
def __str__(self):
s = [str(d) for d in self.__list]
return '\n'.join(s)
def __getitem__(self, key):
if type(key) is slice:
i = key.indices(len(self.__list))
result = [self.__list[n] for n in range(*i)]
return Legend(result)
elif type(key) is list:
result = []
for j in key:
result.append(self.__list[j])
return Legend(result)
else:
return self.__list[key]
def __setitem__(self, key, value):
self.__list[key] = value
def __iter__(self):
return self
def __next__(self):
try:
result = self.__list[self.__index]
except IndexError:
self.__index = 0
raise StopIteration
self.__index += 1
return result
def next(self):
"""
Retains Python 2 compatibility.
"""
return self.__next__()
def __len__(self):
return len(self.__list)
def __contains__(self, item):
if isinstance(item, Decor):
for d in self.__list:
if item == d:
return True
if isinstance(item, Component):
for d in self.__list:
if item == d.component:
return True
return False
def __add__(self, other):
if isinstance(other, self.__class__):
result = self.__list + other.__list
return Legend(result)
elif isinstance(other, Decor):
result = self.__list + [other]
return Legend(result)
else:
raise LegendError("You can only add legends or decors.")
def _repr_html_(self):
"""
Jupyter Notebook magic repr function.
"""
all_keys = list(set(itertools.chain(*[d.keys for d in self])))
rows = ''
for decor in self:
th, tr = decor._repr_html_row_(keys=all_keys)
rows += '<tr>{}</tr>'.format(tr)
header = '<tr>{}</tr>'.format(th)
html = '<table>{}{}</table>'.format(header, rows)
return html
@classmethod
def builtin(cls, name):
"""
Generate a default legend.
Args:
name (str): The name of the legend you want. Not case sensitive.
'nsdoe': Nova Scotia Dept. of Energy
'canstrat': Canstrat
'nagmdm__6_2': USGS N. Am. Geol. Map Data Model 6.2
'nagmdm__6_1': USGS N. Am. Geol. Map Data Model 6.1
'nagmdm__4_3': USGS N. Am. Geol. Map Data Model 4.3
'sgmc': USGS State Geologic Map Compilation
Default 'nagmdm__6_2'.
Returns:
Legend: The legend stored in `defaults.py`.
"""
names = {
'nsdoe': LEGEND__NSDOE,
'canstrat': LEGEND__Canstrat,
'nagmdm__6_2': LEGEND__NAGMDM__6_2,
'nagmdm__6_1': LEGEND__NAGMDM__6_1,
'nagmdm__4_3': LEGEND__NAGMDM__4_3,
'sgmc': LEGEND__SGMC,
}
return cls.from_csv(text=names[name.lower()])
@classmethod
def builtin_timescale(cls, name):
"""
Generate a default timescale legend. No arguments.
Returns:
Legend: The timescale stored in `defaults.py`.
"""
names = {
'isc': TIMESCALE__ISC,
'usgs_isc': TIMESCALE__USGS_ISC,
'dnag': TIMESCALE__DNAG,
}
return cls.from_csv(text=names[name.lower()])
# Curry.
default = partialmethod(builtin, name="NAGMDM__6_2")
default_timescale = partialmethod(builtin_timescale, name='ISC')
@classmethod
def random(cls,
components,
width=False,
colour=None,
match_only=None,
):
"""
Generate a random legend for a given list of components.
Args:
components (list or Striplog): A list of components. If you pass
a Striplog, it will use the primary components. If you pass a
component on its own, you will get a random Decor.
width (bool): Also generate widths for the components, based on the
order in which they are encountered.
colour (str): If you want to give the Decors all the same colour,
provide a hex string.
match_only (list): A list of Component properties to use.
Returns:
Legend or Decor: A legend (or Decor) with random colours.
TODO:
It might be convenient to have a partial method to generate an
'empty' legend. Might be an easy way for someone to start with a
template, since it'll have the components in it already.
"""
try: # Treating as a Striplog.
list_of_Decors = [Decor.random(c, match_only=match_only)
for c
in [i[0] for i in components.unique if i[0]]
]
except:
try:
list_of_Decors = [Decor.random(c, match_only=match_only)
for c in components.copy()]
except:
# It's a single component.
list_of_Decors = [Decor.random(components, match_only=match_only)]
if match_only is not None:
# We might have duplicate components.
comps, keeps = [], []
for d in list_of_Decors:
if d.component not in comps:
comps.append(d.component)
keeps.append(d)
list_of_Decors = keeps
if colour is not None:
for d in list_of_Decors:
d.colour = colour
if width:
for i, d in enumerate(list_of_Decors):
d.width = i + 1
return cls(list_of_Decors)
@classmethod
def from_image(cls, filename, components,
ignore=None,
col_offset=0.1,
row_offset=2):
"""
A slightly easier way to make legends from images.
Args:
filename (str)
components (list)
ignore (list): Colours to ignore, e.g. "#FFFFFF" to ignore white.
col_offset (Number): If < 1, interpreted as proportion of way
across the image. If > 1, interpreted as pixels from left.
row_offset (int): Number of pixels to skip at the top of each
interval.
"""
if ignore is None:
ignore = []
rgb = utils.loglike_from_image(filename, offset=col_offset)
loglike = np.array([utils.rgb_to_hex(t) for t in rgb])
# Get the pixels and colour values at 'tops' (i.e. changes).
_, hexes = utils.tops_from_loglike(loglike, offset=row_offset)
# Reduce to unique colours.
hexes_reduced = []
for h in hexes:
if h not in hexes_reduced:
if h not in ignore:
hexes_reduced.append(h)
list_of_Decors = []
for i, c in enumerate(components):
d = Decor({'colour': hexes_reduced[i], 'component': c})
list_of_Decors.append(d)
return cls(list_of_Decors)
@classmethod
def from_striplog(cls, strip,
colour='colour',
width='width',
hatch='hatch',
fields=None,
):
"""
Creates a legend for a striplog whose components already contain.
Args:
components (list): list of components that need to be in the legend
Returns:
legend (striplog.Legend)
"""
components = [i.primary for i in strip]
list_of_Decors = []
for component in components:
f = {}
if fields is None:
fields = component.__dict__.keys()
for field in fields:
f[field] = component[field]
d = {'component': Component(properties=f)}
d['colour'] = component[colour]
d['width'] = component[width]
d['hatch'] = component[hatch]
decor = Decor(d)
if decor not in list_of_Decors:
list_of_Decors.append(decor)
return cls(list_of_Decors)
@classmethod
def from_csv(cls, filename=None, text=None):
"""
Read CSV text and generate a Legend.
Args:
string (str): The CSV string.
In the first row, list the properties. Precede the properties of the
component with 'comp ' or 'component '. For example:
colour, width, comp lithology, comp colour
#FFFFFF, 0, ,
#F7E9A6, 3, Sandstone, Grey
#FF99CC, 2, Anhydrite,
... etc
Note:
To edit a legend, the easiest thing to do is probably this:
- `legend.to_csv()`
- Edit the legend, call it `new_legend`.
- `legend = Legend.from_csv(text=new_legend)`
"""
if (filename is None) and (text is None):
raise LegendError("You must provide a filename or CSV text.")
if (filename is not None):
with open(filename, 'r') as f:
text = f.read()
try:
f = StringIO(text) # Python 3
except TypeError:
f = StringIO(unicode(text)) # Python 2
r = csv.DictReader(f, skipinitialspace=True)
list_of_Decors, components = [], []
kind = 'component'
for row in r:
d, component = {}, {}
for (k, v) in row.items():
if (k in [None, '']):
continue
if (v in [None, '']):
if k.lower() not in ['color', 'colour']:
continue
if k[:4].lower() == 'comp':
prop = ' '.join(k.split()[1:])
if v.lower() == 'true':
component[prop] = True
elif v.lower() == 'false':
component[prop] = False
else:
try:
component[prop] = float(v)
except ValueError:
component[prop] = v.lower()
elif k[:5].lower() == 'curve':
prop = ' '.join(k.split()[1:])
component[prop] = v.lower()
kind = 'curve'
else:
try:
d[k] = float(v)
except ValueError:
d[k] = v.lower()
this_component = Component(component)
d[kind] = this_component
# Check for duplicates and warn.
if this_component in components:
with warnings.catch_warnings():
warnings.simplefilter("always")
w = "This legend contains duplicate components."
warnings.warn(w)
components.append(this_component)
# Append to the master list and continue.
list_of_Decors.append(Decor(d))
return cls(list_of_Decors)
def to_csv(self):
"""
Renders a legend as a CSV string.
No arguments.
Returns:
str: The legend as a CSV.
"""
# We can't delegate this to Decor because we need to know the superset
# of all Decor properties. There may be lots of blanks.
header = []
component_header = []
for row in self:
for j in row.__dict__.keys():
if j == '_colour':
j = 'colour'
header.append(j)
for k in row.component.__dict__.keys():
component_header.append(k)
header = set(header)
component_header = set(component_header)
header.remove('component')
header_row = ''
if 'colour' in header:
header_row += 'colour,'
header.remove('colour')
has_colour = True
for item in header:
header_row += item + ','
for item in component_header:
header_row += 'component ' + item + ','
# Now we have a header row! Phew.
# Next we'll go back over the legend and collect everything.
result = header_row.strip(',') + '\n'
for row in self:
if has_colour:
result += row.__dict__.get('_colour', '') + ','
for item in header:
result += str(row.__dict__.get(item, '')) + ','
for item in component_header:
result += str(row.component.__dict__.get(item, '')) + ','
result += '\n'
return result
@property
def max_width(self):
"""
The maximum width of all the Decors in the Legend. This is needed
to scale a Legend or Striplog when plotting with widths turned on.
"""
try:
maximum = max([row.width for row in self.__list if row.width is not None])
return maximum
except:
return 0
def get_decor(self, c, match_only=None):
"""
Get the decor for a component.
Args:
c (component): The component to look up.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
Decor. The matching Decor from the Legend, or None if not found.
"""
if isinstance(c, Component):
if c:
if match_only:
# Filter the component only those attributes
c = Component({k: getattr(c, k, None) for k in match_only})
for decor in self.__list:
try:
if c == decor.component:
return decor
except AttributeError:
continue
else:
for decor in self.__list:
try:
if getattr(c, 'mnemonic').lower() == decor.curve.mnemonic:
return decor
if getattr(c, '_alias').lower() == decor.curve.mnemonic:
return decor
except AttributeError:
continue
return Decor({'colour': '#eeeeee', 'component': Component()})
def getattr(self, c, attr, default=None, match_only=None):
"""
Get the attribute of a component.
Args:
c (component): The component to look up.
attr (str): The attribute to get.
default (str): What to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
obj. The specified attribute of the matching Decor in the Legend.
"""
matching_decor = self.get_decor(c, match_only=match_only)
try:
return getattr(matching_decor, attr)
except AttributeError:
return default
def get_colour(self, c, default='#eeeeee', match_only=None):
"""
Get the display colour of a component. Wraps `getattr()`.
Development note:
Cannot define this as a `partial()` because I want
to maintain the order of arguments in `getattr()`.
Args:
c (component): The component to look up.
default (str): The colour to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
str. The hex string of the matching Decor in the Legend.
"""
return self.getattr(c=c,
attr='colour',
default=default,
match_only=match_only)
def get_width(self, c, default=0, match_only=None):
"""
Get the display width of a component. Wraps `getattr()`.
Development note: Cannot define this as a `partial()` because I want
to maintain the order of arguments in `getattr()`.
Args:
c (component): The component to look up.
default (float): The width to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
float. The width of the matching Decor in the Legend.
"""
return self.getattr(c=c,
attr='width',
default=default,
match_only=match_only)
def get_component(self, colour, tolerance=0, default=None):
"""
Get the component corresponding to a display colour. This is for
generating a Striplog object from a colour image of a striplog.
Args:
colour (str): The hex colour string to look up.
tolerance (float): The colourspace distance within which to match.
default (component or None): The component to return in the event
of no match.
Returns:
component. The component best matching the provided colour.
"""
if not (0 <= tolerance <= np.sqrt(195075)):
raise LegendError('Tolerance must be between 0 and 441.67')
for decor in self.__list:
if colour.lower() == decor.colour:
return decor.component
# If we're here, we didn't find one yet.
r1, g1, b1 = utils.hex_to_rgb(colour)
# Start with a best match of black.
best_match = Component()
best_match_colour = '#000000'
best_match_dist = np.sqrt(r1**2. + g1**2. + b1**2.)
# Now compare to each colour in the legend.
for decor in self.__list:
r2, g2, b2 = decor.rgb
distance = np.sqrt((r2-r1)**2. + (g2-g1)**2. + (b2-b1)**2.)
if distance < best_match_dist:
best_match = decor.component
best_match_dist = distance
best_match_colour = decor.colour
if best_match_dist <= tolerance:
return best_match
else:
with warnings.catch_warnings():
warnings.simplefilter("always")
w = "No match found for {0} ".format(colour.lower())
w += "with tolerance of {0}. Best match is ".format(tolerance)
w += "{0}, {1}".format(best_match.summary(), best_match_colour)
w += ", d={0}".format(best_match_dist)
warnings.warn(w)
return default
def plot(self, fmt=None, ax=None):
"""
Make a simple plot of the legend.
Calls Decor.plot() on all of its members.
TODO: Build a more attractive plot.
"""
if ax is None:
fig, ax = plt.subplots()
return_ax = False
else:
return_ax = True
height = width = (0.9 / len(self))
h_incr = 1 / len(self)
left_pos = 0.1
bot_pos = 0.0
for decor in self:
cax = utils.add_subplot_axes(ax, [left_pos, bot_pos, width, height])
cax = decor.plot(ax=cax)
bot_pos += h_incr
ax.axis('off')
if return_ax:
return ax
else:
plt.show()
| agile-geoscience/striplog | striplog/legend.py | Python | apache-2.0 | 30,661 |
#!/usr/bin/env python # pylint: disable=too-many-lines
# ___ ___ _ _ ___ ___ _ _____ ___ ___
# / __| __| \| | __| _ \ /_\_ _| __| \
# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
# | |) | (_) | | .` | (_) || | | _|| |) | | | |
# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
'''
OpenShiftCLI class that wraps the oc commands in a subprocess
'''
# pylint: disable=too-many-lines
import atexit
import json
import os
import re
import shutil
import subprocess
import ruamel.yaml as yaml
#import yaml
#
## This is here because of a bug that causes yaml
## to incorrectly handle timezone info on timestamps
#def timestamp_constructor(_, node):
# '''return timestamps as strings'''
# return str(node.value)
#yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
class OpenShiftCLIError(Exception):
'''Exception class for openshiftcli'''
pass
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False,
all_namespaces=False):
''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
self.all_namespaces = all_namespaces
# Pylint allows only 5 arguments to be passed.
# pylint: disable=too-many-arguments
def _replace_content(self, resource, rname, content, force=False, sep='.'):
''' replace the current object with the content '''
res = self._get(resource, rname)
if not res['results']:
return res
fname = '/tmp/%s' % rname
yed = Yedit(fname, res['results'][0], separator=sep)
changes = []
for key, value in content.items():
changes.append(yed.put(key, value))
if any([change[0] for change in changes]):
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._replace(fname, force)
return {'returncode': 0, 'updated': False}
def _replace(self, fname, force=False):
'''return all pods '''
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
return self.openshift_cmd(cmd)
def _create_from_content(self, rname, content):
'''return all pods '''
fname = '/tmp/%s' % rname
yed = Yedit(fname, content=content)
yed.write()
atexit.register(Utils.cleanup, [fname])
return self._create(fname)
def _create(self, fname):
'''return all pods '''
return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname, selector=None):
'''return all pods '''
cmd = ['delete', resource, rname, '-n', self.namespace]
if selector:
cmd.append('--selector=%s' % selector)
return self.openshift_cmd(cmd)
def _process(self, template_name, create=False, params=None, template_data=None):
'''return all pods '''
cmd = ['process', '-n', self.namespace]
if template_data:
cmd.extend(['-f', '-'])
else:
cmd.append(template_name)
if params:
param_str = ["%s=%s" % (key, value) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
results = self.openshift_cmd(cmd, output=True, input_data=template_data)
if results['returncode'] != 0 or not create:
return results
fname = '/tmp/%s' % template_name
yed = Yedit(fname, results['results'])
yed.write()
atexit.register(Utils.cleanup, [fname])
return self.openshift_cmd(['-n', self.namespace, 'create', '-f', fname])
def _get(self, resource, rname=None, selector=None):
'''return a resource by name '''
cmd = ['get', resource]
if selector:
cmd.append('--selector=%s' % selector)
if self.all_namespaces:
cmd.extend(['--all-namespaces'])
elif self.namespace:
cmd.extend(['-n', self.namespace])
cmd.extend(['-o', 'json'])
if rname:
cmd.append(rname)
rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
rval['results'] = rval['items']
elif not isinstance(rval['results'], list):
rval['results'] = [rval['results']]
return rval
def _schedulable(self, node=None, selector=None, schedulable=True):
''' perform oadm manage-node scheduable '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
cmd.append('--schedulable=%s' % schedulable)
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _list_pods(self, node=None, selector=None, pod_selector=None):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
cmd.extend(['--list-pods', '-o', 'json'])
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
#pylint: disable=too-many-arguments
def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False):
''' perform oadm manage-node evacuate '''
cmd = ['manage-node']
if node:
cmd.extend(node)
else:
cmd.append('--selector=%s' % selector)
if dry_run:
cmd.append('--dry-run')
if pod_selector:
cmd.append('--pod-selector=%s' % pod_selector)
if grace_period:
cmd.append('--grace-period=%s' % int(grace_period))
if force:
cmd.append('--force')
cmd.append('--evacuate')
return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw')
def _import_image(self, url=None, name=None, tag=None):
''' perform image import '''
cmd = ['import-image']
image = '{0}'.format(name)
if tag:
image += ':{0}'.format(tag)
cmd.append(image)
if url:
cmd.append('--from={0}/{1}'.format(url, image))
cmd.append('-n{0}'.format(self.namespace))
cmd.append('--confirm')
return self.openshift_cmd(cmd)
#pylint: disable=too-many-arguments
def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None):
'''Base command for oc '''
cmds = []
if oadm:
cmds = ['/usr/bin/oadm']
else:
cmds = ['/usr/bin/oc']
cmds.extend(cmd)
rval = {}
results = ''
err = None
if self.verbose:
print ' '.join(cmds)
proc = subprocess.Popen(cmds,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={'KUBECONFIG': self.kubeconfig})
stdout, stderr = proc.communicate(input_data)
rval = {"returncode": proc.returncode,
"results": results,
"cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
if output_type == 'json':
try:
rval['results'] = json.loads(stdout)
except ValueError as err:
if "No JSON object could be decoded" in err.message:
err = err.message
elif output_type == 'raw':
rval['results'] = stdout
if self.verbose:
print stdout
print stderr
if err:
rval.update({"err": err,
"stderr": stderr,
"stdout": stdout,
"cmd": cmds
})
else:
rval.update({"stderr": stderr,
"stdout": stdout,
"results": {},
})
return rval
class Utils(object):
''' utilities for openshiftcli modules '''
@staticmethod
def create_file(rname, data, ftype='yaml'):
''' create a file in tmp with name and contents'''
path = os.path.join('/tmp', rname)
with open(path, 'w') as fds:
if ftype == 'yaml':
fds.write(yaml.dump(data, Dumper=yaml.RoundTripDumper))
elif ftype == 'json':
fds.write(json.dumps(data))
else:
fds.write(data)
# Register cleanup when module is done
atexit.register(Utils.cleanup, [path])
return path
@staticmethod
def create_files_from_contents(content, content_type=None):
'''Turn an array of dict: filename, content into a files array'''
if not isinstance(content, list):
content = [content]
files = []
for item in content:
path = Utils.create_file(item['path'], item['data'], ftype=content_type)
files.append({'name': os.path.basename(path), 'path': path})
return files
@staticmethod
def cleanup(files):
'''Clean up on exit '''
for sfile in files:
if os.path.exists(sfile):
if os.path.isdir(sfile):
shutil.rmtree(sfile)
elif os.path.isfile(sfile):
os.remove(sfile)
@staticmethod
def exists(results, _name):
''' Check to see if the results include the name '''
if not results:
return False
if Utils.find_result(results, _name):
return True
return False
@staticmethod
def find_result(results, _name):
''' Find the specified result by name'''
rval = None
for result in results:
if result.has_key('metadata') and result['metadata']['name'] == _name:
rval = result
break
return rval
@staticmethod
def get_resource_file(sfile, sfile_type='yaml'):
''' return the service file '''
contents = None
with open(sfile) as sfd:
contents = sfd.read()
if sfile_type == 'yaml':
contents = yaml.load(contents, yaml.RoundTripLoader)
elif sfile_type == 'json':
contents = json.loads(contents)
return contents
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements,too-many-statements
@staticmethod
def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
if skip_keys:
skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
continue
# Both are lists
if isinstance(value, list):
if not user_def.has_key(key):
if debug:
print 'User data does not have key [%s]' % key
print 'User data: %s' % user_def
return False
if not isinstance(user_def[key], list):
if debug:
print 'user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])
return False
if len(user_def[key]) != len(value):
if debug:
print "List lengths are not equal."
print "key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))
print "user_def: %s" % user_def[key]
print "value: %s" % value
return False
for values in zip(user_def[key], value):
if isinstance(values[0], dict) and isinstance(values[1], dict):
if debug:
print 'sending list - list'
print type(values[0])
print type(values[1])
result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
if not result:
print 'list compare returned false'
return False
elif value != user_def[key]:
if debug:
print 'value should be identical'
print value
print user_def[key]
return False
# recurse on a dictionary
elif isinstance(value, dict):
if not user_def.has_key(key):
if debug:
print "user_def does not have key [%s]" % key
return False
if not isinstance(user_def[key], dict):
if debug:
print "dict returned false: not instance of dict"
return False
# before passing ensure keys match
api_values = set(value.keys()) - set(skip)
user_values = set(user_def[key].keys()) - set(skip)
if api_values != user_values:
if debug:
print "keys are not equal in dict"
print api_values
print user_values
return False
result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
print result
return False
# Verify each key, value pair is the same
else:
if not user_def.has_key(key) or value != user_def[key]:
if debug:
print "value not equal; user_def does not have key"
print key
print value
if user_def.has_key(key):
print user_def[key]
return False
if debug:
print 'returning true'
return True
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
self.kubeconfig = kubeconfig
self.name = rname
self.namespace = namespace
self._options = options
@property
def config_options(self):
''' return config options '''
return self._options
def to_option_list(self):
'''return all options as a string'''
return self.stringify()
def stringify(self):
''' return the options hash as cli params in a string '''
rval = []
for key, data in self.config_options.items():
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
return rval
class YeditException(Exception):
''' Exception class for Yedit '''
pass
class Yedit(object):
''' Class to modify yaml files '''
re_valid_key = r"(((\[-?\d+\])|([0-9a-zA-Z%s/_-]+)).?)+$"
re_key = r"(?:\[(-?\d+)\])|([0-9a-zA-Z%s/_-]+)"
com_sep = set(['.', '#', '|', ':'])
# pylint: disable=too-many-arguments
def __init__(self, filename=None, content=None, content_type='yaml', separator='.', backup=False):
self.content = content
self._separator = separator
self.filename = filename
self.__yaml_dict = content
self.content_type = content_type
self.backup = backup
self.load(content_type=self.content_type)
if self.__yaml_dict == None:
self.__yaml_dict = {}
@property
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@separator.setter
def separator(self):
''' getter method for yaml_dict '''
return self._separator
@property
def yaml_dict(self):
''' getter method for yaml_dict '''
return self.__yaml_dict
@yaml_dict.setter
def yaml_dict(self, value):
''' setter method for yaml_dict '''
self.__yaml_dict = value
@staticmethod
def parse_key(key, sep='.'):
'''parse the key allowing the appropriate separator'''
common_separators = list(Yedit.com_sep - set([sep]))
return re.findall(Yedit.re_key % ''.join(common_separators), key)
@staticmethod
def valid_key(key, sep='.'):
'''validate the incoming key'''
common_separators = list(Yedit.com_sep - set([sep]))
if not re.match(Yedit.re_valid_key % ''.join(common_separators), key):
return False
return True
@staticmethod
def remove_entry(data, key, sep='.'):
''' remove data at location key '''
if key == '' and isinstance(data, dict):
data.clear()
return True
elif key == '' and isinstance(data, list):
del data[:]
return True
if not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
# process last index for remove
# expected list entry
if key_indexes[-1][0]:
if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
del data[int(key_indexes[-1][0])]
return True
# expected dict entry
elif key_indexes[-1][1]:
if isinstance(data, dict):
del data[key_indexes[-1][1]]
return True
@staticmethod
def add_entry(data, key, item=None, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a#b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes[:-1]:
if dict_key:
if isinstance(data, dict) and data.has_key(dict_key) and data[dict_key]:
data = data[dict_key]
continue
elif data and not isinstance(data, dict):
return None
data[dict_key] = {}
data = data[dict_key]
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
if key == '':
data = item
# process last index for add
# expected list entry
elif key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
data[int(key_indexes[-1][0])] = item
# expected dict entry
elif key_indexes[-1][1] and isinstance(data, dict):
data[key_indexes[-1][1]] = item
return data
@staticmethod
def get_entry(data, key, sep='.'):
''' Get an item from a dictionary with key notation a.b.c
d = {'a': {'b': 'c'}}}
key = a.b
return c
'''
if key == '':
pass
elif not (key and Yedit.valid_key(key, sep)) and isinstance(data, (list, dict)):
return None
key_indexes = Yedit.parse_key(key, sep)
for arr_ind, dict_key in key_indexes:
if dict_key and isinstance(data, dict):
data = data.get(dict_key, None)
elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
data = data[int(arr_ind)]
else:
return None
return data
def write(self):
''' write to file '''
if not self.filename:
raise YeditException('Please specify a filename.')
if self.backup and self.file_exists():
shutil.copy(self.filename, self.filename + '.orig')
tmp_filename = self.filename + '.yedit'
try:
with open(tmp_filename, 'w') as yfd:
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
yfd.write(yaml.dump(self.yaml_dict, Dumper=yaml.RoundTripDumper))
except Exception as err:
raise YeditException(err.message)
os.rename(tmp_filename, self.filename)
return (True, self.yaml_dict)
def read(self):
''' read from file '''
# check if it exists
if self.filename == None or not self.file_exists():
return None
contents = None
with open(self.filename) as yfd:
contents = yfd.read()
return contents
def file_exists(self):
''' return whether file exists '''
if os.path.exists(self.filename):
return True
return False
def load(self, content_type='yaml'):
''' return yaml file '''
contents = self.read()
if not contents and not self.content:
return None
if self.content:
if isinstance(self.content, dict):
self.yaml_dict = self.content
return self.yaml_dict
elif isinstance(self.content, str):
contents = self.content
# check if it is yaml
try:
if content_type == 'yaml' and contents:
self.yaml_dict = yaml.load(contents, yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
self.yaml_dict.fa.set_block_style()
elif content_type == 'json' and contents:
self.yaml_dict = json.loads(contents)
except yaml.YAMLError as err:
# Error loading yaml or json
raise YeditException('Problem with loading yaml file. %s' % err)
return self.yaml_dict
def get(self, key):
''' get a specified key'''
try:
entry = Yedit.get_entry(self.yaml_dict, key, self.separator)
except KeyError as _:
entry = None
return entry
def pop(self, path, key_or_item):
''' remove a key, value pair from a dict or an item for a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if entry.has_key(key_or_item):
entry.pop(key_or_item)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
try:
ind = entry.index(key_or_item)
except ValueError:
return (False, self.yaml_dict)
entry.pop(ind)
return (True, self.yaml_dict)
return (False, self.yaml_dict)
def delete(self, path):
''' remove path from a dict'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == None:
return (False, self.yaml_dict)
result = Yedit.remove_entry(self.yaml_dict, path, self.separator)
if not result:
return (False, self.yaml_dict)
return (True, self.yaml_dict)
def exists(self, path, value):
''' check if value exists at path'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, list):
if value in entry:
return True
return False
elif isinstance(entry, dict):
if isinstance(value, dict):
rval = False
for key, val in value.items():
if entry[key] != val:
rval = False
break
else:
rval = True
return rval
return value in entry
return entry == value
def append(self, path, value):
'''append value to a list'''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry is None:
self.put(path, [])
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
if not isinstance(entry, list):
return (False, self.yaml_dict)
# pylint: disable=no-member,maybe-no-member
entry.append(value)
return (True, self.yaml_dict)
# pylint: disable=too-many-arguments
def update(self, path, value, index=None, curr_value=None):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if isinstance(entry, dict):
# pylint: disable=no-member,maybe-no-member
if not isinstance(value, dict):
raise YeditException('Cannot replace key, value entry in dict with non-dict type.' \
' value=[%s] [%s]' % (value, type(value)))
entry.update(value)
return (True, self.yaml_dict)
elif isinstance(entry, list):
# pylint: disable=no-member,maybe-no-member
ind = None
if curr_value:
try:
ind = entry.index(curr_value)
except ValueError:
return (False, self.yaml_dict)
elif index != None:
ind = index
if ind != None and entry[ind] != value:
entry[ind] = value
return (True, self.yaml_dict)
# see if it exists in the list
try:
ind = entry.index(value)
except ValueError:
# doesn't exist, append it
entry.append(value)
return (True, self.yaml_dict)
#already exists, return
if ind != None:
return (False, self.yaml_dict)
return (False, self.yaml_dict)
def put(self, path, value):
''' put path, value into a dict '''
try:
entry = Yedit.get_entry(self.yaml_dict, path, self.separator)
except KeyError as _:
entry = None
if entry == value:
return (False, self.yaml_dict)
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if not result:
return (False, self.yaml_dict)
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
def create(self, path, value):
''' create a yaml file '''
if not self.file_exists():
# deepcopy didn't work
tmp_copy = yaml.load(yaml.round_trip_dump(self.yaml_dict, default_flow_style=False), yaml.RoundTripLoader)
# pylint: disable=no-member
if hasattr(self.yaml_dict, 'fa'):
tmp_copy.fa.set_block_style()
result = Yedit.add_entry(tmp_copy, path, value, self.separator)
if result:
self.yaml_dict = tmp_copy
return (True, self.yaml_dict)
return (False, self.yaml_dict)
# pylint: disable=too-many-instance-attributes
class PersistentVolumeClaimConfig(object):
''' Handle pvc options '''
# pylint: disable=too-many-arguments
def __init__(self,
sname,
namespace,
kubeconfig,
access_modes=None,
vol_capacity='1G'):
''' constructor for handling pvc options '''
self.kubeconfig = kubeconfig
self.name = sname
self.namespace = namespace
self.access_modes = access_modes
self.vol_capacity = vol_capacity
self.data = {}
self.create_dict()
def create_dict(self):
''' return a service as a dict '''
# version
self.data['apiVersion'] = 'v1'
# kind
self.data['kind'] = 'PersistentVolumeClaim'
# metadata
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
# spec
self.data['spec'] = {}
self.data['spec']['accessModes'] = ['ReadWriteOnce']
if self.access_modes:
self.data['spec']['accessModes'] = self.access_modes
# storage capacity
self.data['spec']['resources'] = {}
self.data['spec']['resources']['requests'] = {}
self.data['spec']['resources']['requests']['storage'] = self.vol_capacity
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class PersistentVolumeClaim(Yedit):
''' Class to wrap the oc command line tools '''
access_modes_path = "spec.accessModes"
volume_capacity_path = "spec.requests.storage"
volume_name_path = "spec.volumeName"
bound_path = "status.phase"
kind = 'PersistentVolumeClaim'
def __init__(self, content):
'''RoleBinding constructor'''
super(PersistentVolumeClaim, self).__init__(content=content)
self._access_modes = None
self._volume_capacity = None
self._volume_name = None
@property
def volume_name(self):
''' volume_name property '''
if self._volume_name == None:
self._volume_name = self.get_volume_name()
return self._volume_name
@volume_name.setter
def volume_name(self, data):
''' volume_name property setter'''
self._volume_name = data
@property
def access_modes(self):
''' access_modes property '''
if self._access_modes == None:
self._access_modes = self.get_access_modes()
return self._access_modes
@access_modes.setter
def access_modes(self, data):
''' access_modes property setter'''
self._access_modes = data
@property
def volume_capacity(self):
''' volume_capacity property '''
if self._volume_capacity == None:
self._volume_capacity = self.get_volume_capacity()
return self._volume_capacity
@volume_capacity.setter
def volume_capacity(self, data):
''' volume_capacity property setter'''
self._volume_capacity = data
def get_access_modes(self):
'''get access_modes'''
return self.get(PersistentVolumeClaim.access_modes_path) or []
def get_volume_capacity(self):
'''get volume_capacity'''
return self.get(PersistentVolumeClaim.volume_capacity_path) or []
def get_volume_name(self):
'''get volume_name'''
return self.get(PersistentVolumeClaim.volume_name_path) or []
def is_bound(self):
'''return whether volume is bound'''
return self.get(PersistentVolumeClaim.bound_path) or []
#### ADD #####
def add_access_mode(self, inc_mode):
''' add an access_mode'''
if self.access_modes:
self.access_modes.append(inc_mode)
else:
self.put(PersistentVolumeClaim.access_modes_path, [inc_mode])
return True
#### /ADD #####
#### Remove #####
def remove_access_mode(self, inc_mode):
''' remove an access_mode'''
try:
self.access_modes.remove(inc_mode)
except ValueError as _:
return False
return True
#### /REMOVE #####
#### UPDATE #####
def update_access_mode(self, inc_mode):
''' update an access_mode'''
try:
index = self.access_modes.index(inc_mode)
except ValueError as _:
return self.add_access_mode(inc_mode)
self.access_modes[index] = inc_mode
return True
#### /UPDATE #####
#### FIND ####
def find_access_mode(self, inc_mode):
''' find a user '''
index = None
try:
index = self.access_modes.index(inc_mode)
except ValueError as _:
return index
return index
# pylint: disable=too-many-instance-attributes
class OCPVC(OpenShiftCLI):
''' Class to wrap the oc command line tools '''
kind = 'pvc'
# pylint allows 5
# pylint: disable=too-many-arguments
def __init__(self,
config,
verbose=False):
''' Constructor for OCVolume '''
super(OCPVC, self).__init__(config.namespace, config.kubeconfig)
self.config = config
self.namespace = config.namespace
self._pvc = None
@property
def pvc(self):
''' property function pvc'''
if not self._pvc:
self.get()
return self._pvc
@pvc.setter
def pvc(self, data):
''' setter function for yedit var '''
self._pvc = data
def bound(self):
'''return whether the pvc is bound'''
if self.pvc.get_volume_name():
return True
return False
def exists(self):
''' return whether a pvc exists '''
if self.pvc:
return True
return False
def get(self):
'''return pvc information '''
result = self._get(self.kind, self.config.name)
if result['returncode'] == 0:
self.pvc = PersistentVolumeClaim(content=result['results'][0])
elif '\"%s\" not found' % self.config.name in result['stderr']:
result['returncode'] = 0
result['results'] = [{}]
return result
def delete(self):
'''delete the object'''
return self._delete(self.kind, self.config.name)
def create(self):
'''create the object'''
return self._create_from_content(self.config.name, self.config.data)
def update(self):
'''update the object'''
# need to update the tls information and the service name
return self._replace_content(self.kind, self.config.name, self.config.data)
def needs_update(self):
''' verify an update is needed '''
if self.pvc.get_volume_name() or self.pvc.is_bound():
return False
skip = []
return not Utils.check_def_equal(self.config.data, self.pvc.yaml_dict, skip_keys=skip, debug=True)
#pylint: disable=too-many-branches
def main():
'''
ansible oc module for pvc
'''
module = AnsibleModule(
argument_spec=dict(
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
state=dict(default='present', type='str',
choices=['present', 'absent', 'list']),
debug=dict(default=False, type='bool'),
name=dict(default=None, required=True, type='str'),
namespace=dict(default=None, required=True, type='str'),
volume_capacity=dict(default='1G', type='str'),
access_modes=dict(default=None, type='list'),
),
supports_check_mode=True,
)
pconfig = PersistentVolumeClaimConfig(module.params['name'],
module.params['namespace'],
module.params['kubeconfig'],
module.params['access_modes'],
module.params['volume_capacity'],
)
oc_pvc = OCPVC(pconfig, verbose=module.params['debug'])
state = module.params['state']
api_rval = oc_pvc.get()
#####
# Get
#####
if state == 'list':
module.exit_json(changed=False, results=api_rval['results'], state="list")
########
# Delete
########
if state == 'absent':
if oc_pvc.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oc_pvc.delete()
module.exit_json(changed=True, results=api_rval, state="absent")
module.exit_json(changed=False, state="absent")
if state == 'present':
########
# Create
########
if not oc_pvc.exists():
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
# Create it here
api_rval = oc_pvc.create()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_pvc.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
########
# Update
########
if oc_pvc.pvc.is_bound() or oc_pvc.pvc.get_volume_name():
api_rval['msg'] = '##### - This volume is currently bound. Will not update - ####'
module.exit_json(changed=False, results=api_rval, state="present")
if oc_pvc.needs_update():
api_rval = oc_pvc.update()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
# return the created object
api_rval = oc_pvc.get()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, results=api_rval, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
| joelsmith/openshift-tools | ansible/roles/lib_openshift_3.2/library/oc_pvc.py | Python | apache-2.0 | 39,870 |
# Copyright 2020 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test parsing a `DeclareLaunchArgument` action."""
import io
import math
import textwrap
from launch.actions import TimerAction
from launch.frontend import Parser
from launch.substitutions import LaunchConfiguration
def test_timer():
xml_file = \
"""\
<launch>
<timer period="5">
<executable cmd="ls -las"/>
</timer>
</launch>
"""
xml_file = textwrap.dedent(xml_file)
root_entity, parser = Parser.load(io.StringIO(xml_file))
ld = parser.parse_description(root_entity)
timer = ld.entities[0]
assert isinstance(timer, TimerAction)
assert isinstance(timer.period, float)
assert math.isclose(timer.period, 5.)
assert len(timer.actions) == 1
def test_timer_period_is_substitution():
xml_file = \
"""\
<launch>
<timer period="$(var my_period 5)">
<executable cmd="ls -las"/>
</timer>
</launch>
"""
xml_file = textwrap.dedent(xml_file)
root_entity, parser = Parser.load(io.StringIO(xml_file))
ld = parser.parse_description(root_entity)
timer = ld.entities[0]
assert isinstance(timer, TimerAction)
assert isinstance(timer.period, list)
assert len(timer.period) == 1
assert isinstance(timer.period[0], LaunchConfiguration)
assert len(timer.actions) == 1
| ros2/launch | launch_xml/test/launch_xml/test_timer.py | Python | apache-2.0 | 1,975 |
#coding=utf-8
import os
# Basic settings
# requests settings
TIMEOUT = 5
VERIFY = False
# directories might be used
LOCATIONS = {
'log': 'log',
'data': 'data',
}
# stderr is redirected to this file
ERR_LOG_FILE = os.path.join(LOCATIONS['log'], 'err.log')
# log in this file
LOGGING_FILE = os.path.join(LOCATIONS['log'], 'requests.log')
STATION_NAME_FILE = os.path.join(LOCATIONS['data'], 'station_name.js')
CAPTCHA_FILE = os.path.join(LOCATIONS['data'], 'captcha.png')
CRYPTO_JS = os.path.join(LOCATIONS['data'], 'crypto.js')
CRYPTO_SCRIPT = os.path.join(LOCATIONS['data'], 'do_crypto.js')
# Query settings
QUERY_INTERVAL = 1
QUERY_ARGS_NS = 'leftTicketDTO'
TRAIN_DATA_JSON_KEY = 'queryLeftNewDTO'
LOGIN_NS = 'loginUserDTO'
USER_NS = 'userDTO'
PURPOSE_CODES = {'学生': '0X00', '普通': 'ADULT'}
PURPOSE_ID = {'0X00': 3, '学生': 3, 'ADULT': 1, '普通': 1}
SEAT_CODES = {
'商务座': 'swz',
'特等座': 'tz',
'一等座': 'zy',
'二等座': 'ze',
'高级软卧': 'gr',
'软卧': 'rw',
'硬卧': 'yw',
'软座': 'rz',
'硬座': 'yz',
'无座': 'wz',
'其他': 'qt',
}
SEAT_ID = {
'SWZ': '9',
'TZ': 'P',
'ZY': 'M',
'ZE': 'O',
'GR': '6',
'RW': '4',
'YW': '3',
'RZ': '2',
'YZ': '1',
'WZ': 'WZ',
'QT': '',
}
URL_BASE = 'https://kyfw.12306.cn/'
URLS = {
'entry': URL_BASE + 'otn/',
'station_name': URL_BASE + 'otn/resources/js/framework/station_name.js?station_version=1.8260',
'query': URL_BASE + 'otn/leftTicket/queryT',
'query_log': URL_BASE + 'otn/leftTicket/log',
'login_captcha': URL_BASE + 'otn/passcodeNew/getPassCodeNew?module=login&rand=sjrand',
'order_captcha': URL_BASE + 'otn/passcodeNew/getPassCodeNew?module=passenger&rand=randp',
'check_captcha': URL_BASE + 'otn/passcodeNew/checkRandCodeAnsyn',
'login_token': URL_BASE + 'otn/login/init',
'order_init_token': URL_BASE + 'otn/leftTicket/init',
'login': URL_BASE + 'otn/login/loginAysnSuggest',
'check_login': URL_BASE + 'otn/login/checkUser',
'passengers': URL_BASE + 'otn/confirmPassenger/getPassengerDTOs',
'order_init_submit': URL_BASE + 'otn/leftTicket/submitOrderRequest',
'order_confirm': URL_BASE + 'otn/confirmPassenger/initDc',
'order_check': URL_BASE + 'otn/confirmPassenger/checkOrderInfo',
}
# 3rd party tools settings
# Setup for settings
import socket
if socket.gethostname() in ['duankq-ThinkPad-X201', ]:
DEBUG = True
else:
DEBUG = False
import os
for loc in LOCATIONS.values():
if not os.path.isdir(loc):
os.mkdir(loc)
for (k, v) in SEAT_CODES.iteritems():
SEAT_ID[k] = SEAT_ID[v.upper()]
SEAT_ID[v] = SEAT_ID[v.upper()]
| Moonshile/fast12306 | src/core/settings.py | Python | apache-2.0 | 2,707 |
#-*- coding: utf-8 -*-
def factorial(n):
"""Return the factorial of n"""
if n < 2:
return 1
return n * factorial(n - 1)
def fibonacci(n):
"""Return the nth fibonacci number"""
if n < 2:
return n
return fibonacci(n - 1) + fibonacci(n - 2)
def fib_fac(x=30, y=900):
fib = fibonacci(x)
fac = factorial(y)
print "fibonacci({}):".format(x), fib
print "factorial({}):".format(y), fac
if __name__ == "__main__":
def opc1():
fruits = tuple(str(i) for i in xrange(100))
out = ''
for fruit in fruits:
out += fruit +':'
return out
def opc2():
format_str = '%s:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str % fruits
return out
def opc3():
format_str = '{}:' * 100
fruits = tuple(str(i) for i in xrange(100))
out = format_str.format(*fruits)
return out
def opc4():
fruits = tuple(str(i) for i in xrange(100))
out = ':'.join(fruits)
return out
import timeit
print timeit.timeit(stmt=opc4, number=100)
fib_fac()
| ealogar/curso-python | advanced/fib_fac.py | Python | apache-2.0 | 1,159 |
'''
Tree from:
http://www.quesucede.com/page/show/id/python-3-tree-implementation
'''
from urllib.parse import urlparse
import os
(_ROOT, _DEPTH, _BREADTH) = range(3)
class Node:
def __init__(self, identifier):
self.__identifier = identifier
self.__children = []
@property
def identifier(self):
return self.__identifier
@property
def children(self):
return self.__children
def add_child(self, identifier):
self.__children.append(identifier)
class Tree:
def __init__(self):
self.__nodes = {}
@property
def nodes(self):
return self.__nodes
def add_node(self, identifier, parent=None):
print("identifier: " + identifier + " parent= " + str(parent))
node = Node(identifier)
self[identifier] = node
if parent is not None:
self[parent].add_child(identifier)
return node
def display(self, identifier, depth=_ROOT):
children = self[identifier].children
if depth == _ROOT:
print("{0}".format(identifier))
else:
print("\t"*depth, "{0}".format(identifier))
depth += 1
for child in children:
print("\t"*depth, "{0}".format(identifier))
self.display(child, depth) # recursive call
def traverse(self, identifier, mode=_DEPTH):
yield identifier
queue = self[identifier].children
while queue:
yield queue[0]
expansion = self[queue[0]].children
if mode == _DEPTH:
queue = expansion + queue[1:] # depth-first
elif mode == _BREADTH:
queue = queue[1:] + expansion # width-first
def __getitem__(self, key):
return self.__nodes[key]
def __setitem__(self, key, item):
self.__nodes[key] = item
'''
tree = Tree()
t = print("{0}".format("palestras"))
tree.add_node("Harry") # root node
tree.add_node("Jane", t)
tree.add_node("Bill", "Harry")
tree.add_node("Joe", "Jane")
tree.add_node("Diane", "Jane")
tree.add_node("George", "Diane")
tree.add_node("Mary", "Diane")
tree.add_node("Jill", "George")
tree.add_node("Carol", "Jill")
tree.add_node("Grace", "Bill")
tree.add_node("Mark", "Jane")
tree.display("Harry")
print("***** DEPTH-FIRST ITERATION *****")
for node in tree.traverse("Harry"):
print(node)
print("***** BREADTH-FIRST ITERATION *****")
for node in tree.traverse("Harry", mode=_BREADTH):
print(node)
''' | glaudsonml/kurgan-ai | libs/Tree.py | Python | apache-2.0 | 2,507 |
from moto.dynamodb2.comparisons import get_comparison_func
from moto.dynamodb2.exceptions import IncorrectDataType
from moto.dynamodb2.models.utilities import bytesize
class DDBType(object):
"""
Official documentation at https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_AttributeValue.html
"""
BINARY_SET = "BS"
NUMBER_SET = "NS"
STRING_SET = "SS"
STRING = "S"
NUMBER = "N"
MAP = "M"
LIST = "L"
BOOLEAN = "BOOL"
BINARY = "B"
NULL = "NULL"
class DDBTypeConversion(object):
_human_type_mapping = {
val: key.replace("_", " ")
for key, val in DDBType.__dict__.items()
if key.upper() == key
}
@classmethod
def get_human_type(cls, abbreviated_type):
"""
Args:
abbreviated_type(str): An attribute of DDBType
Returns:
str: The human readable form of the DDBType.
"""
return cls._human_type_mapping.get(abbreviated_type, abbreviated_type)
class DynamoType(object):
"""
http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelDataTypes
"""
def __init__(self, type_as_dict):
if type(type_as_dict) == DynamoType:
self.type = type_as_dict.type
self.value = type_as_dict.value
else:
self.type = list(type_as_dict)[0]
self.value = list(type_as_dict.values())[0]
if self.is_list():
self.value = [DynamoType(val) for val in self.value]
elif self.is_map():
self.value = dict((k, DynamoType(v)) for k, v in self.value.items())
def filter(self, projection_expressions):
nested_projections = [
expr[0 : expr.index(".")] for expr in projection_expressions if "." in expr
]
if self.is_map():
expressions_to_delete = []
for attr in self.value:
if (
attr not in projection_expressions
and attr not in nested_projections
):
expressions_to_delete.append(attr)
elif attr in nested_projections:
relevant_expressions = [
expr[len(attr + ".") :]
for expr in projection_expressions
if expr.startswith(attr + ".")
]
self.value[attr].filter(relevant_expressions)
for expr in expressions_to_delete:
self.value.pop(expr)
def __hash__(self):
return hash((self.type, self.value))
def __eq__(self, other):
return self.type == other.type and self.value == other.value
def __ne__(self, other):
return self.type != other.type or self.value != other.value
def __lt__(self, other):
return self.cast_value < other.cast_value
def __le__(self, other):
return self.cast_value <= other.cast_value
def __gt__(self, other):
return self.cast_value > other.cast_value
def __ge__(self, other):
return self.cast_value >= other.cast_value
def __repr__(self):
return "DynamoType: {0}".format(self.to_json())
def __add__(self, other):
if self.type != other.type:
raise TypeError("Different types of operandi is not allowed.")
if self.is_number():
self_value = float(self.value) if "." in self.value else int(self.value)
other_value = float(other.value) if "." in other.value else int(other.value)
return DynamoType(
{DDBType.NUMBER: "{v}".format(v=self_value + other_value)}
)
else:
raise IncorrectDataType()
def __sub__(self, other):
if self.type != other.type:
raise TypeError("Different types of operandi is not allowed.")
if self.type == DDBType.NUMBER:
self_value = float(self.value) if "." in self.value else int(self.value)
other_value = float(other.value) if "." in other.value else int(other.value)
return DynamoType(
{DDBType.NUMBER: "{v}".format(v=self_value - other_value)}
)
else:
raise TypeError("Sum only supported for Numbers.")
def __getitem__(self, item):
if isinstance(item, str):
# If our DynamoType is a map it should be subscriptable with a key
if self.type == DDBType.MAP:
return self.value[item]
elif isinstance(item, int):
# If our DynamoType is a list is should be subscriptable with an index
if self.type == DDBType.LIST:
return self.value[item]
raise TypeError(
"This DynamoType {dt} is not subscriptable by a {it}".format(
dt=self.type, it=type(item)
)
)
def __setitem__(self, key, value):
if isinstance(key, int):
if self.is_list():
if key >= len(self.value):
# DynamoDB doesn't care you are out of box just add it to the end.
self.value.append(value)
else:
self.value[key] = value
elif isinstance(key, str):
if self.is_map():
self.value[key] = value
else:
raise NotImplementedError("No set_item for {t}".format(t=type(key)))
@property
def cast_value(self):
if self.is_number():
try:
return int(self.value)
except ValueError:
return float(self.value)
elif self.is_set():
sub_type = self.type[0]
return set([DynamoType({sub_type: v}).cast_value for v in self.value])
elif self.is_list():
return [DynamoType(v).cast_value for v in self.value]
elif self.is_map():
return dict([(k, DynamoType(v).cast_value) for k, v in self.value.items()])
else:
return self.value
def child_attr(self, key):
"""
Get Map or List children by key. str for Map, int for List.
Returns DynamoType or None.
"""
if isinstance(key, str) and self.is_map():
if key in self.value:
return DynamoType(self.value[key])
if isinstance(key, int) and self.is_list():
idx = key
if 0 <= idx < len(self.value):
return DynamoType(self.value[idx])
return None
def size(self):
if self.is_number():
value_size = len(str(self.value))
elif self.is_set():
sub_type = self.type[0]
value_size = sum([DynamoType({sub_type: v}).size() for v in self.value])
elif self.is_list():
value_size = sum([v.size() for v in self.value])
elif self.is_map():
value_size = sum(
[bytesize(k) + DynamoType(v).size() for k, v in self.value.items()]
)
elif type(self.value) == bool:
value_size = 1
else:
value_size = bytesize(self.value)
return value_size
def to_json(self):
return {self.type: self.value}
def compare(self, range_comparison, range_objs):
"""
Compares this type against comparison filters
"""
range_values = [obj.cast_value for obj in range_objs]
comparison_func = get_comparison_func(range_comparison)
return comparison_func(self.cast_value, *range_values)
def is_number(self):
return self.type == DDBType.NUMBER
def is_set(self):
return self.type in (DDBType.STRING_SET, DDBType.NUMBER_SET, DDBType.BINARY_SET)
def is_list(self):
return self.type == DDBType.LIST
def is_map(self):
return self.type == DDBType.MAP
def same_type(self, other):
return self.type == other.type
def pop(self, key, *args, **kwargs):
if self.is_map() or self.is_list():
self.value.pop(key, *args, **kwargs)
else:
raise TypeError("pop not supported for DynamoType {t}".format(t=self.type))
| spulec/moto | moto/dynamodb2/models/dynamo_type.py | Python | apache-2.0 | 8,167 |
import sys
def solve(B):
ans = [0]*B
guess = 1
for _ in range(10):
print(guess)
sys.stdout.flush()
n = int(input().strip())
ans[guess-1] = n
guess += 1
print("".join(map(str, ans)))
sys.stdout.flush()
result = input()
if result == "N":
sys.exit()
return
T, B = map(int, input().split())
for case in range(1, T+1):
solve(B)
| zuun77/givemegoogletshirts | codejam/2020/qual/q4.py | Python | apache-2.0 | 411 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import packaging.version
from elasticsearch_dsl import Date, Document, Float, Keyword, Text, analyzer
from warehouse.search.utils import doc_type
EmailAnalyzer = analyzer(
"email",
tokenizer="uax_url_email",
filter=["lowercase", "stop", "snowball"],
)
NameAnalyzer = analyzer(
"normalized_name",
tokenizer="lowercase",
filter=["lowercase", "word_delimiter"],
)
@doc_type
class Project(Document):
name = Text()
normalized_name = Text(analyzer=NameAnalyzer)
version = Keyword(multi=True)
latest_version = Keyword()
summary = Text(analyzer="snowball")
description = Text(analyzer="snowball")
author = Text()
author_email = Text(analyzer=EmailAnalyzer)
maintainer = Text()
maintainer_email = Text(analyzer=EmailAnalyzer)
license = Text()
home_page = Keyword()
download_url = Keyword()
keywords = Text(analyzer="snowball")
platform = Keyword()
created = Date()
classifiers = Keyword(multi=True)
zscore = Float()
@classmethod
def from_db(cls, release):
obj = cls(meta={"id": release.normalized_name})
obj["name"] = release.name
obj["normalized_name"] = release.normalized_name
obj["version"] = sorted(
release.all_versions, key=lambda r: packaging.version.parse(r), reverse=True
)
obj["latest_version"] = release.latest_version
obj["summary"] = release.summary
obj["description"] = release.description
obj["author"] = release.author
obj["author_email"] = release.author_email
obj["maintainer"] = release.maintainer
obj["maintainer_email"] = release.maintainer_email
obj["home_page"] = release.home_page
obj["download_url"] = release.download_url
obj["keywords"] = release.keywords
obj["platform"] = release.platform
obj["created"] = release.created
obj["classifiers"] = release.classifiers
obj["zscore"] = release.zscore
return obj
class Index:
# make sure this class can match any index so it will always be used to
# deserialize data coming from elasticsearch.
name = "*"
| pypa/warehouse | warehouse/packaging/search.py | Python | apache-2.0 | 2,723 |
# Copyright (c) OpenMMLab. All rights reserved.
# flake8: noqa
import warnings
from .formatting import *
warnings.warn('DeprecationWarning: mmdet.datasets.pipelines.formating will be '
'deprecated, please replace it with '
'mmdet.datasets.pipelines.formatting.')
| open-mmlab/mmdetection | mmdet/datasets/pipelines/formating.py | Python | apache-2.0 | 293 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateLake
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataplex
# [START dataplex_v1_generated_DataplexService_UpdateLake_async]
from google.cloud import dataplex_v1
async def sample_update_lake():
# Create a client
client = dataplex_v1.DataplexServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.UpdateLakeRequest(
)
# Make the request
operation = client.update_lake(request=request)
print("Waiting for operation to complete...")
response = await operation.result()
# Handle the response
print(response)
# [END dataplex_v1_generated_DataplexService_UpdateLake_async]
| googleapis/python-dataplex | samples/generated_samples/dataplex_v1_generated_dataplex_service_update_lake_async.py | Python | apache-2.0 | 1,513 |
import logging
import sys
from typing import Any, Callable
from django.conf import settings
from django.core.management.base import BaseCommand, \
CommandError, CommandParser
from tornado import ioloop
from tornado.log import app_log
# We must call zerver.tornado.ioloop_logging.instrument_tornado_ioloop
# before we import anything else from our project in order for our
# Tornado load logging to work; otherwise we might accidentally import
# zerver.lib.queue (which will instantiate the Tornado ioloop) before
# this.
from zerver.tornado.ioloop_logging import instrument_tornado_ioloop
settings.RUNNING_INSIDE_TORNADO = True
instrument_tornado_ioloop()
from zerver.lib.debug import interactive_debug_listen
from zerver.tornado.application import create_tornado_application, \
setup_tornado_rabbitmq
from zerver.tornado.autoreload import start as zulip_autoreload_start
from zerver.tornado.event_queue import add_client_gc_hook, \
missedmessage_hook, process_notification, setup_event_queue
from zerver.tornado.sharding import notify_tornado_queue_name, tornado_return_queue_name
from zerver.tornado.socket import respond_send_message
if settings.USING_RABBITMQ:
from zerver.lib.queue import get_queue_client
def handle_callback_exception(callback: Callable[..., Any]) -> None:
logging.exception("Exception in callback")
app_log.error("Exception in callback %r", callback, exc_info=True)
class Command(BaseCommand):
help = "Starts a Tornado Web server wrapping Django."
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument('addrport', nargs="?", type=str,
help='[optional port number or ipaddr:port]\n '
'(use multiple ports to start multiple servers)')
parser.add_argument('--nokeepalive', action='store_true',
dest='no_keep_alive', default=False,
help="Tells Tornado to NOT keep alive http connections.")
parser.add_argument('--noxheaders', action='store_false',
dest='xheaders', default=True,
help="Tells Tornado to NOT override remote IP with X-Real-IP.")
def handle(self, addrport: str, **options: bool) -> None:
interactive_debug_listen()
import django
from tornado import httpserver
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % (port,))
xheaders = options.get('xheaders', True)
no_keep_alive = options.get('no_keep_alive', False)
quit_command = 'CTRL-C'
if settings.DEBUG:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s')
def inner_run() -> None:
from django.conf import settings
from django.utils import translation
translation.activate(settings.LANGUAGE_CODE)
print("Validating Django models.py...")
self.check(display_num_errors=True)
print("\nDjango version %s" % (django.get_version(),))
print("Tornado server is running at http://%s:%s/" % (addr, port))
print("Quit the server with %s." % (quit_command,))
if settings.USING_RABBITMQ:
queue_client = get_queue_client()
# Process notifications received via RabbitMQ
queue_client.register_json_consumer(notify_tornado_queue_name(int(port)),
process_notification)
queue_client.register_json_consumer(tornado_return_queue_name(int(port)),
respond_send_message)
try:
# Application is an instance of Django's standard wsgi handler.
application = create_tornado_application(int(port))
if settings.AUTORELOAD:
zulip_autoreload_start()
# start tornado web server in single-threaded mode
http_server = httpserver.HTTPServer(application,
xheaders=xheaders,
no_keep_alive=no_keep_alive)
http_server.listen(int(port), address=addr)
setup_event_queue(int(port))
add_client_gc_hook(missedmessage_hook)
setup_tornado_rabbitmq()
from zerver.tornado.ioloop_logging import logging_data
logging_data['port'] = port
instance = ioloop.IOLoop.instance()
if django.conf.settings.DEBUG:
instance.set_blocking_log_threshold(5)
instance.handle_callback_exception = handle_callback_exception
instance.start()
except KeyboardInterrupt:
sys.exit(0)
inner_run()
| rishig/zulip | zerver/management/commands/runtornado.py | Python | apache-2.0 | 5,167 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import testtools
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.scenario import manager
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestVolumeBootPattern(manager.EncryptionScenarioTest):
# Boot from volume scenario is quite slow, and needs extra
# breathing room to get through deletes in the time allotted.
TIMEOUT_SCALING_FACTOR = 2
@classmethod
def skip_checks(cls):
super(TestVolumeBootPattern, cls).skip_checks()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
def _create_volume_from_image(self):
img_uuid = CONF.compute.image_ref
vol_name = data_utils.rand_name(
self.__class__.__name__ + '-volume-origin')
return self.create_volume(name=vol_name, imageRef=img_uuid)
def _get_bdm(self, source_id, source_type, delete_on_termination=False):
bd_map_v2 = [{
'uuid': source_id,
'source_type': source_type,
'destination_type': 'volume',
'boot_index': 0,
'delete_on_termination': delete_on_termination}]
return {'block_device_mapping_v2': bd_map_v2}
def _boot_instance_from_resource(self, source_id,
source_type,
keypair=None,
security_group=None,
delete_on_termination=False):
create_kwargs = dict()
if keypair:
create_kwargs['key_name'] = keypair['name']
if security_group:
create_kwargs['security_groups'] = [
{'name': security_group['name']}]
create_kwargs.update(self._get_bdm(
source_id,
source_type,
delete_on_termination=delete_on_termination))
return self.create_server(image_id='', **create_kwargs)
def _delete_server(self, server):
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
@decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@utils.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
"""This test case attempts to reproduce the following steps:
* Create in Cinder some bootable volume importing a Glance image
* Boot an instance from the bootable volume
* Write content to the volume
* Delete an instance and Boot a new instance from the volume
* Check written content in the instance
* Create a volume snapshot while the instance is running
* Boot an additional instance from the new snapshot based volume
* Check written content in the instance booted from snapshot
"""
LOG.info("Creating keypair and security group")
keypair = self.create_keypair()
security_group = self._create_security_group()
# create an instance from volume
LOG.info("Booting instance 1 from volume")
volume_origin = self._create_volume_from_image()
instance_1st = self._boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
security_group=security_group)
LOG.info("Booted first instance: %s", instance_1st)
# write content to volume on instance
LOG.info("Setting timestamp in instance %s", instance_1st)
ip_instance_1st = self.get_server_ip(instance_1st)
timestamp = self.create_timestamp(ip_instance_1st,
private_key=keypair['private_key'])
# delete instance
LOG.info("Deleting first instance: %s", instance_1st)
self._delete_server(instance_1st)
# create a 2nd instance from volume
instance_2nd = self._boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
security_group=security_group)
LOG.info("Booted second instance %s", instance_2nd)
# check the content of written file
LOG.info("Getting timestamp in instance %s", instance_2nd)
ip_instance_2nd = self.get_server_ip(instance_2nd)
timestamp2 = self.get_timestamp(ip_instance_2nd,
private_key=keypair['private_key'])
self.assertEqual(timestamp, timestamp2)
# snapshot a volume
LOG.info("Creating snapshot from volume: %s", volume_origin['id'])
snapshot = self.create_volume_snapshot(volume_origin['id'], force=True)
# create a 3rd instance from snapshot
LOG.info("Creating third instance from snapshot: %s", snapshot['id'])
volume = self.create_volume(snapshot_id=snapshot['id'],
size=snapshot['size'])
LOG.info("Booting third instance from snapshot")
server_from_snapshot = (
self._boot_instance_from_resource(source_id=volume['id'],
source_type='volume',
keypair=keypair,
security_group=security_group))
LOG.info("Booted third instance %s", server_from_snapshot)
# check the content of written file
LOG.info("Logging into third instance to get timestamp: %s",
server_from_snapshot)
server_from_snapshot_ip = self.get_server_ip(server_from_snapshot)
timestamp3 = self.get_timestamp(server_from_snapshot_ip,
private_key=keypair['private_key'])
self.assertEqual(timestamp, timestamp3)
@decorators.idempotent_id('05795fb2-b2a7-4c9f-8fac-ff25aedb1489')
@decorators.attr(type='slow')
@utils.services('compute', 'image', 'volume')
def test_create_server_from_volume_snapshot(self):
# Create a volume from an image
boot_volume = self._create_volume_from_image()
# Create a snapshot
boot_snapshot = self.create_volume_snapshot(boot_volume['id'])
# Create a server from a volume snapshot
server = self._boot_instance_from_resource(
source_id=boot_snapshot['id'],
source_type='snapshot',
delete_on_termination=True)
server_info = self.servers_client.show_server(server['id'])['server']
# The created volume when creating a server from a snapshot
created_volume = server_info['os-extended-volumes:volumes_attached']
self.assertNotEmpty(created_volume, "No volume attachment found.")
created_volume_info = self.volumes_client.show_volume(
created_volume[0]['id'])['volume']
# Verify the server was created from the snapshot
self.assertEqual(
boot_volume['volume_image_metadata']['image_id'],
created_volume_info['volume_image_metadata']['image_id'])
self.assertEqual(boot_snapshot['id'],
created_volume_info['snapshot_id'])
self.assertEqual(server['id'],
created_volume_info['attachments'][0]['server_id'])
self.assertEqual(created_volume[0]['id'],
created_volume_info['attachments'][0]['volume_id'])
@decorators.idempotent_id('36c34c67-7b54-4b59-b188-02a2f458a63b')
@utils.services('compute', 'volume', 'image')
def test_create_ebs_image_and_check_boot(self):
# create an instance from volume
volume_origin = self._create_volume_from_image()
instance = self._boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
delete_on_termination=True)
# create EBS image
image = self.create_server_snapshot(instance)
# delete instance
self._delete_server(instance)
# boot instance from EBS image
instance = self.create_server(image_id=image['id'])
# just ensure that instance booted
# delete instance
self._delete_server(instance)
@decorators.idempotent_id('cb78919a-e553-4bab-b73b-10cf4d2eb125')
@testtools.skipUnless(CONF.compute_feature_enabled.attach_encrypted_volume,
'Encrypted volume attach is not supported')
@utils.services('compute', 'volume')
def test_boot_server_from_encrypted_volume_luks(self):
# Create an encrypted volume
volume = self.create_encrypted_volume('nova.volume.encryptors.'
'luks.LuksEncryptor',
volume_type='luks')
self.volumes_client.set_bootable_volume(volume['id'], bootable=True)
# Boot a server from the encrypted volume
server = self._boot_instance_from_resource(
source_id=volume['id'],
source_type='volume',
delete_on_termination=False)
server_info = self.servers_client.show_server(server['id'])['server']
created_volume = server_info['os-extended-volumes:volumes_attached']
self.assertEqual(volume['id'], created_volume[0]['id'])
| Juniper/tempest | tempest/scenario/test_volume_boot_pattern.py | Python | apache-2.0 | 10,161 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import operator
import os
import random
import re
import string
import tempfile
import traceback
import zipfile
import numpy as np
from six import StringIO
from six.moves import xrange
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# pylint: disable=g-import-not-at-top
import tensorflow as tf
from google.protobuf import text_format
# TODO(aselle): switch to TensorFlow's resource_loader
from tensorflow.lite.testing import generate_examples_report as report_lib
from tensorflow.lite.testing import string_util_wrapper
from tensorflow.python.framework import test_util
from tensorflow.python.framework import graph_util as tf_graph_util
from tensorflow.python.ops import rnn
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import spectral_ops_test_util
RANDOM_SEED = 342
TEST_INPUT_DEPTH = 3
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
KNOWN_BUGS = {
# TOCO doesn't support scalars as input.
# Concat doesn't work with a single input tensor
r"concat.*num_tensors=1": "67378344",
# Transposition in MatMul is not fully supported.
"fully_connected.*transpose_a=True": "67586970",
# Softmax graphs are too complex.
r"softmax.*dim=0": "67749831",
# BatchToSpaceND only supports 4D tensors.
r"batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\]": "70594733",
# Div will use floordiv.
r"div.*int32": "72051395",
}
class Options(object):
"""All options for example generation."""
def __init__(self):
# Directory where the outputs will be go.
self.output_path = None
# Particular zip to output.
self.zip_to_output = None
# Path to toco tool.
self.toco = None
# If a particular model is affected by a known bug count it as a Toco
# error.
self.known_bugs_are_errors = False
# Raise an exception if any converter error is encountered.
self.ignore_converter_errors = False
# Include intermediate graphdefs in the output zip files.
self.save_graphdefs = False
# Whether the TFLite Flex converter is being used.
self.run_with_flex = False
# The function to convert a TensorFLow model to TFLite model.
# See the document for `toco_convert` function for its required signature.
# TODO(ycling): Decouple `toco_convert` function from this module, and
# remove the `toco` attribute in this class.
self.tflite_convert_function = toco_convert
# A map from regular expression to bug number. Any test failure with label
# matching the expression will be considered due to the corresponding bug.
self.known_bugs = KNOWN_BUGS
# A map from names to functions which make test cases.
_MAKE_TEST_FUNCTIONS_MAP = {}
# A decorator to register the make test functions.
# Usage:
# All the make_*_test should be registered. Example:
# @register_make_test_function()
# def make_conv_tests(options):
# # ...
# If a function is decorated by other decorators, it's required to specify the
# name explicitly. Example:
# @register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
# @test_util.enable_control_flow_v2
# def make_unidirectional_sequence_lstm_tests(options):
# # ...
def register_make_test_function(name=None):
def decorate(function, name=name):
if name is None:
name = function.__name__
_MAKE_TEST_FUNCTIONS_MAP[name] = function
return decorate
class ExtraTocoOptions(object):
"""Additional toco options besides input, output, shape."""
def __init__(self):
# Whether to ignore control dependency nodes.
self.drop_control_dependency = False
# Allow custom ops in the toco conversion.
self.allow_custom_ops = False
# Rnn states that are used to support rnn / lstm cells.
self.rnn_states = None
# Split the LSTM inputs from 5 inoputs to 18 inputs for TFLite.
self.split_tflite_lstm_inputs = None
def toco_options(data_types,
input_arrays,
output_arrays,
shapes,
extra_toco_options=ExtraTocoOptions()):
"""Create TOCO options to process a model.
Args:
data_types: input and inference types used by TOCO.
input_arrays: names of the input tensors
output_arrays: name of the output tensors
shapes: shapes of the input tensors
extra_toco_options: additional toco options
Returns:
the options in a string.
"""
shape_str = ":".join([",".join(str(y) for y in x) for x in shapes if x])
inference_type = "FLOAT"
# TODO(ahentz): if we get multi-input quantization to work we need this
# to change
if data_types[0] == "QUANTIZED_UINT8":
inference_type = "QUANTIZED_UINT8"
s = (" --input_data_types=%s" % ",".join(data_types) +
" --inference_type=%s" % inference_type +
" --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
" --input_arrays=%s" % ",".join(input_arrays) +
" --output_arrays=%s" % ",".join(output_arrays))
if shape_str:
s += (" --input_shapes=%s" % shape_str)
if extra_toco_options.drop_control_dependency:
s += " --drop_control_dependency"
if extra_toco_options.allow_custom_ops:
s += " --allow_custom_ops"
if extra_toco_options.rnn_states:
s += (" --rnn_states='" + extra_toco_options.rnn_states + "'")
if extra_toco_options.split_tflite_lstm_inputs is not None:
if extra_toco_options.split_tflite_lstm_inputs:
s += " --split_tflite_lstm_inputs=true"
else:
s += " --split_tflite_lstm_inputs=false"
return s
def format_result(t):
"""Convert a tensor to a format that can be used in test specs."""
if t.dtype.kind not in [np.dtype(np.string_).kind, np.dtype(np.object_).kind]:
# Output 9 digits after the point to ensure the precision is good enough.
values = ["{:.9f}".format(value) for value in list(t.flatten())]
return ",".join(values)
else:
return string_util_wrapper.SerializeAsHexString(t.flatten())
def write_examples(fp, examples):
"""Given a list `examples`, write a text format representation.
The file format is csv like with a simple repeated pattern. We would ike
to use proto here, but we can't yet due to interfacing with the Android
team using this format.
Args:
fp: File-like object to write to.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
def write_tensor(fp, x):
"""Write tensor in file format supported by TFLITE example."""
fp.write("dtype,%s\n" % x.dtype)
fp.write("shape," + ",".join(map(str, x.shape)) + "\n")
fp.write("values," + format_result(x) + "\n")
fp.write("test_cases,%d\n" % len(examples))
for example in examples:
fp.write("inputs,%d\n" % len(example["inputs"]))
for i in example["inputs"]:
write_tensor(fp, i)
fp.write("outputs,%d\n" % len(example["outputs"]))
for i in example["outputs"]:
write_tensor(fp, i)
def write_test_cases(fp, model_name, examples):
"""Given a dictionary of `examples`, write a text format representation.
The file format is protocol-buffer-like, even though we don't use proto due
to the needs of the Android team.
Args:
fp: File-like object to write to.
model_name: Filename where the model was written to, relative to filename.
examples: Example dictionary consiting of keys "inputs" and "outputs"
"""
fp.write("load_model: %s\n" % os.path.basename(model_name))
for example in examples:
fp.write("reshape {\n")
for t in example["inputs"]:
fp.write(" input: \"" + ",".join(map(str, t.shape)) + "\"\n")
fp.write("}\n")
fp.write("invoke {\n")
for t in example["inputs"]:
fp.write(" input: \"" + format_result(t) + "\"\n")
for t in example["outputs"]:
fp.write(" output: \"" + format_result(t) + "\"\n")
fp.write(" output_shape: \"" + ",".join([str(dim) for dim in t.shape]) +
"\"\n")
fp.write("}\n")
_TF_TYPE_INFO = {
tf.float32: (np.float32, "FLOAT"),
tf.float16: (np.float16, "FLOAT"),
tf.int32: (np.int32, "INT32"),
tf.uint8: (np.uint8, "QUANTIZED_UINT8"),
tf.int16: (np.int16, "QUANTIZED_INT16"),
tf.int64: (np.int64, "INT64"),
tf.bool: (np.bool, "BOOL"),
tf.string: (np.string_, "STRING"),
}
def create_tensor_data(dtype, shape, min_value=-100, max_value=100):
"""Build tensor data spreading the range [min_value, max_value)."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value-min_value)*np.random.random_sample(shape)+min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value+1, shape)
elif dtype == tf.bool:
value = np.random.choice([True, False], size=shape)
elif dtype == np.string_:
# Not the best strings, but they will do for some basic testing.
letters = list(string.ascii_uppercase)
return np.random.choice(letters, size=shape).astype(dtype)
return np.dtype(dtype).type(value) if np.isscalar(value) else value.astype(
dtype)
def create_scalar_data(dtype, min_value=-100, max_value=100):
"""Build scalar tensor data range from min_value to max_value exclusively."""
if dtype in _TF_TYPE_INFO:
dtype = _TF_TYPE_INFO[dtype][0]
if dtype in (tf.float32, tf.float16):
value = (max_value - min_value) * np.random.random() + min_value
elif dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
value = np.random.randint(min_value, max_value + 1)
return np.array(value, dtype=dtype)
def freeze_graph(session, outputs):
"""Freeze the current graph.
Args:
session: Tensorflow sessions containing the graph
outputs: List of output tensors
Returns:
The frozen graph_def.
"""
return tf_graph_util.convert_variables_to_constants(
session, session.graph.as_graph_def(), [x.op.name for x in outputs])
@register_make_test_function()
def make_control_dep_tests(options):
"""Make a set of tests that use control dependencies."""
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
assert_op = tf.assert_greater_equal(input_tensor, input_tensor - 1)
with tf.control_dependencies([assert_op]):
out = tf.nn.conv2d(input_tensor, filter_value,
strides=(1, 1, 1, 1), padding="SAME")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.drop_control_dependency = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
expected_tf_failures=3)
def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs):
"""Convert a model's graph def into a tflite model.
NOTE: this currently shells out to the toco binary, but we would like
convert to Python API tooling in the future.
Args:
options: An Options instance.
graph_def: A GraphDef object.
input_tensors: List of input tensor tuples `(name, shape, type)`.
output_tensors: List of output tensors (names).
**kwargs: Extra options to be passed.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
# Convert ophint ops if presented.
graph_def = tf.lite.experimental.convert_op_hints_to_stubs(
graph_def=graph_def)
graph_def_str = graph_def.SerializeToString()
extra_toco_options = kwargs.get("extra_toco_options", ExtraTocoOptions())
test_params = kwargs.get("test_params", {})
input_arrays = [x[0] for x in input_tensors]
data_types = [_TF_TYPE_INFO[x[2]][1] for x in input_tensors]
if test_params.get("fully_quantize", False):
with tempfile.NamedTemporaryFile() as graphdef_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
input_shapes = get_input_shapes_map(input_tensors)
converter = tf.lite.TocoConverter.from_frozen_graph(
graphdef_file.name, input_arrays, output_tensors, input_shapes)
def representative_dataset(input_tensors):
calibration_inputs = []
for _, shape, _ in input_tensors:
if shape:
dims = [dim.value for dim in shape.dims]
calibration_inputs.append(
np.random.uniform(-1, 1, tuple(dims)).astype(np.float32))
return calibration_inputs
def representative_dataset_gen():
for _ in range(100):
yield representative_dataset(input_tensors)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.representative_dataset = representative_dataset_gen
try:
tflite_model = converter.convert()
return tflite_model, ""
except Exception as e:
log = "{0}\n{1}".format(str(e), traceback.format_exc())
return None, log
else:
opts = toco_options(
data_types=data_types,
input_arrays=input_arrays,
shapes=[x[1] for x in input_tensors],
output_arrays=output_tensors,
extra_toco_options=extra_toco_options)
with tempfile.NamedTemporaryFile() as graphdef_file, \
tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
# TODO(aselle): Switch this to subprocess at some point.
if "pb2lite" in bin_path and options.run_with_flex:
opts = ("--input_arrays={0} --output_arrays={1}".format(
",".join(input_arrays), ",".join(output_tensors)))
elif options.run_with_flex:
opts += " --enable_select_tf_ops --force_select_tf_ops"
cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" %
(bin_path, graphdef_file.name, output_file.name, opts,
stdout_file.name))
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
def get_input_shapes_map(input_tensors):
"""Gets a map of input names to shapes.
Args:
input_tensors: List of input tensor tuples `(name, shape, type)`.
Returns:
{string : list of integers}.
"""
input_arrays = [tensor[0] for tensor in input_tensors]
input_shapes_list = []
for _, shape, _ in input_tensors:
dims = None
if shape:
dims = [dim.value for dim in shape.dims]
input_shapes_list.append(dims)
input_shapes = {
name: shape
for name, shape in zip(input_arrays, input_shapes_list)
if shape
}
return input_shapes
def normalize_output_name(output_name):
"""Remove :0 suffix from tensor names."""
return output_name.split(":")[0] if output_name.endswith(
":0") else output_name
# How many test cases we may have in a zip file. Too many test cases will
# slow down the test data generation process.
_MAX_TESTS_PER_ZIP = 500
def make_zip_of_tests(options,
test_parameters,
make_graph,
make_test_inputs,
extra_toco_options=ExtraTocoOptions(),
use_frozen_graph=False,
expected_tf_failures=0):
"""Helper to make a zip file of a bunch of TensorFlow models.
This does a cartestian product of the dictionary of test_parameters and
calls make_graph() for each item in the cartestian product set.
If the graph is built successfully, then make_test_inputs() is called to
build expected input/output value pairs. The model is then converted to tflite
with toco, and the examples are serialized with the tflite model into a zip
file (2 files per item in the cartesian product set).
Args:
options: An Options instance.
test_parameters: Dictionary mapping to lists for each parameter.
e.g. `{"strides": [[1,3,3,1], [1,2,2,1]], "foo": [1.2, 1.3]}`
make_graph: function that takes current parameters and returns tuple
`[input1, input2, ...], [output1, output2, ...]`
make_test_inputs: function taking `curr_params`, `session`, `input_tensors`,
`output_tensors` and returns tuple `(input_values, output_values)`.
extra_toco_options: Additional toco options.
use_frozen_graph: Whether or not freeze graph before toco converter.
expected_tf_failures: Number of times tensorflow is expected to fail in
executing the input graphs. In some cases it is OK for TensorFlow to
fail because the one or more combination of parameters is invalid.
Raises:
RuntimeError: if there are converter errors that can't be ignored.
"""
zip_path = os.path.join(options.output_path, options.zip_to_output)
parameter_count = 0
for parameters in test_parameters:
parameter_count += functools.reduce(
operator.mul, [len(values) for values in parameters.values()])
if parameter_count > _MAX_TESTS_PER_ZIP:
raise RuntimeError(
"Too many parameter combinations for generating '%s'.\n"
"There are %d combinations while the upper limit is %d.\n"
"Having too many combinations will slow down the tests.\n"
"Please consider splitting the test into multiple functions.\n"
% (zip_path, parameter_count, _MAX_TESTS_PER_ZIP))
# TODO(aselle): Make this allow multiple inputs outputs.
archive = zipfile.PyZipFile(zip_path, "w")
zip_manifest = []
convert_report = []
toco_errors = 0
processed_labels = set()
for parameters in test_parameters:
keys = parameters.keys()
for curr in itertools.product(*parameters.values()):
label = zip_path.replace(".zip", "_") + (",".join(
"%s=%r" % z for z in sorted(zip(keys, curr))).replace(" ", ""))
if label[0] == "/":
label = label[1:]
if label in processed_labels:
# Do not populate data for the same label more than once. It will cause
# errors when unzipping.
continue
processed_labels.add(label)
param_dict = dict(zip(keys, curr))
def build_example(label, param_dict_real):
"""Build the model with parameter values set in param_dict_real.
Args:
label: Label of the model (i.e. the filename in the zip).
param_dict_real: Parameter dictionary (arguments to the factories
make_graph and make_test_inputs)
Returns:
(tflite_model_binary, report) where tflite_model_binary is the
serialized flatbuffer as a string and report is a dictionary with
keys `toco_log` (log of toco conversion), `tf_log` (log of tf
conversion), `toco` (a string of success status of the conversion),
`tf` (a string success status of the conversion).
"""
np.random.seed(RANDOM_SEED)
report = {"toco": report_lib.NOTRUN, "tf": report_lib.FAILED}
# Build graph
report["tf_log"] = ""
report["toco_log"] = ""
tf.reset_default_graph()
with tf.device("/cpu:0"):
try:
inputs, outputs = make_graph(param_dict_real)
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
sess = tf.Session()
try:
baseline_inputs, baseline_outputs = (make_test_inputs(
param_dict_real, sess, inputs, outputs))
except (tf.errors.UnimplementedError, tf.errors.InvalidArgumentError,
ValueError):
report["tf_log"] += traceback.format_exc()
return None, report
report["toco"] = report_lib.FAILED
report["tf"] = report_lib.SUCCESS
# Convert graph to toco
input_tensors = [(input_tensor.name.split(":")[0], input_tensor.shape,
input_tensor.dtype) for input_tensor in inputs]
output_tensors = [normalize_output_name(out.name) for out in outputs]
graph_def = freeze_graph(
sess,
tf.global_variables() + inputs +
outputs) if use_frozen_graph else sess.graph_def
if "split_tflite_lstm_inputs" in param_dict_real:
extra_toco_options.split_tflite_lstm_inputs = param_dict_real[
"split_tflite_lstm_inputs"]
tflite_model_binary, toco_log = options.tflite_convert_function(
options,
graph_def,
input_tensors,
output_tensors,
extra_toco_options=extra_toco_options,
test_params=param_dict_real)
report["toco"] = (report_lib.SUCCESS if tflite_model_binary is not None
else report_lib.FAILED)
report["toco_log"] = toco_log
if True or options.save_graphdefs:
archive.writestr(label + ".pbtxt",
text_format.MessageToString(graph_def),
zipfile.ZIP_DEFLATED)
if tflite_model_binary:
archive.writestr(label + ".bin", tflite_model_binary,
zipfile.ZIP_DEFLATED)
example = {"inputs": baseline_inputs, "outputs": baseline_outputs}
example_fp = StringIO()
write_examples(example_fp, [example])
archive.writestr(label + ".inputs",
example_fp.getvalue(), zipfile.ZIP_DEFLATED)
example_fp2 = StringIO()
write_test_cases(example_fp2, label + ".bin", [example])
archive.writestr(label + "_tests.txt",
example_fp2.getvalue(), zipfile.ZIP_DEFLATED)
zip_manifest.append(label + "\n")
return tflite_model_binary, report
_, report = build_example(label, param_dict)
if report["toco"] == report_lib.FAILED:
ignore_error = False
if not options.known_bugs_are_errors:
for pattern, bug_number in options.known_bugs.items():
if re.search(pattern, label):
print("Ignored converter error due to bug %s" % bug_number)
ignore_error = True
if not ignore_error:
toco_errors += 1
print("-----------------\nconverter error!\n%s\n-----------------\n" %
report["toco_log"])
convert_report.append((param_dict, report))
report_io = StringIO()
report_lib.make_report_table(report_io, zip_path, convert_report)
archive.writestr("report.html", report_io.getvalue())
archive.writestr("manifest.txt", "".join(zip_manifest), zipfile.ZIP_DEFLATED)
# Log statistics of what succeeded
total_conversions = len(convert_report)
tf_success = sum(1 for x in convert_report
if x[1]["tf"] == report_lib.SUCCESS)
toco_success = sum(1 for x in convert_report
if x[1]["toco"] == report_lib.SUCCESS)
percent = 0
if tf_success > 0:
percent = float(toco_success) / float(tf_success) * 100.
tf.logging.info(("Archive %s Considered %d graphs, %d TF evaluated graphs "
" and %d TOCO converted graphs (%.1f%%"), zip_path,
total_conversions, tf_success, toco_success, percent)
tf_failures = parameter_count - tf_success
if tf_failures / parameter_count > 0.8:
raise RuntimeError(("Test for '%s' is not very useful. "
"TensorFlow fails in %d percent of the cases.") %
(zip_path, int(100 * tf_failures / parameter_count)))
if tf_failures != expected_tf_failures:
raise RuntimeError(("Expected TF to fail %d times while generating '%s', "
"but that happened %d times") % (expected_tf_failures,
zip_path, tf_failures))
if not options.ignore_converter_errors and toco_errors > 0:
raise RuntimeError(
"Found %d errors while generating toco models" % toco_errors)
def make_pool_tests(pool_op_in):
"""Make a set of tests to do average pooling.
Args:
pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`.
Returns:
A function representing the true generator (after curried pool_op_in).
"""
pool_op = pool_op_in
def f(options, expected_tf_failures=0):
"""Actual function that generates examples.
Args:
options: An Options instance.
expected_tf_failures: number of expected tensorflow failures.
"""
# Chose a set of parameters
test_parameters = [{
"ksize": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
"strides": [[2, 1, 1, 2], [1, 1, 1, 1], [1, 1, 2, 1], [1, 10, 11, 1]],
# TODO(aselle): should add in a degenerate shape (e.g. [1, 0, 1, 1]).
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = pool_op(
input_tensor,
ksize=parameters["ksize"],
strides=parameters["strides"],
data_format=parameters["data_format"],
padding=parameters["padding"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return f
@register_make_test_function()
def make_l2_pool_tests(options):
make_pool_tests(make_l2_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_avg_pool_tests(options):
make_pool_tests(tf.nn.avg_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_max_pool_tests(options):
make_pool_tests(tf.nn.max_pool)(options, expected_tf_failures=80)
@register_make_test_function()
def make_abs_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.abs(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-10, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_elu_tests(options):
"""Make a set of tests to do (float) tf.nn.elu."""
test_parameters = [
{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.elu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_identity_tests(options):
"""Make a set of tests to do identity."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [3, 3]],
"use_snapshot": [False, True],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# We add the Multiply before Identity just as a walk-around to make the test
# pass when input_shape is scalar.
# During graph transformation, TOCO will replace the Identity op with
# Reshape when input has shape. However, currently TOCO can't distinguish
# between missing shape and scalar shape. As a result, when input has scalar
# shape, this conversion still fails.
# TODO(b/129197312), remove the walk-around code once the bug is fixed.
input_doubled = input_tensor * 2.0
if parameters["use_snapshot"]:
identity_output = array_ops.snapshot(input_doubled)
else:
identity_output = tf.identity(input_doubled)
return [input_tensor], [identity_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu_tests(options):
"""Make a set of tests to do relu."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu1_tests(options):
"""Make a set of tests to do relu1."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
# Note that the following is not supported:
# out = tf.maximum(-1.0, tf.minimum(input_tensor, 1.0))
out = tf.minimum(1.0, tf.maximum(input_tensor, -1.0))
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_relu6_tests(options):
"""Make a set of tests to do relu6."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.relu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_prelu_tests(options):
"""Make a set of tests to do PReLU."""
test_parameters = [
{
# The canonical case for image processing is having a 4D `input`
# (NHWC)and `shared_axes`=[1, 2], so the alpha parameter is per
# channel.
"input_shape": [[1, 10, 10, 3], [3, 3, 3, 3]],
"shared_axes": [[1, 2], [1]],
},
{
# 2D-3D example. Share the 2nd axis.
"input_shape": [[20, 20], [20, 20, 20]],
"shared_axes": [[1]],
}
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
prelu = tf.keras.layers.PReLU(shared_axes=parameters["shared_axes"])
out = prelu(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_shape = parameters["input_shape"]
input_values = create_tensor_data(
np.float32, input_shape, min_value=-10, max_value=10)
shared_axes = parameters["shared_axes"]
alpha_shape = []
for dim in range(1, len(input_shape)):
alpha_shape.append(1 if dim in shared_axes else input_shape[dim])
alpha_values = create_tensor_data(np.float32, alpha_shape)
# There should be only 1 trainable variable tensor.
variables = tf.all_variables()
assert len(variables) == 1
sess.run(variables[0].assign(alpha_values))
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_leaky_relu_tests(options):
"""Make a set of tests to do LeakyRelu."""
test_parameters = [
{
"input_shape": [[], [1], [5], [1, 10, 10, 3], [3, 3, 3, 3]],
"alpha": [0.1, 1.0, 2.0, -0.1, -1.0, -2.0],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.leaky_relu(input_tensor, alpha=parameters["alpha"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-3, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# This function tests various TensorFLow functions that generates Const op,
# including `tf.ones`, `tf.zeros` and random functions.
@register_make_test_function()
def make_constant_tests(options):
"""Make a set of tests to do constant ops."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[], [1], [2], [1, 1, 1, 1], [2, 2, 2, 2]],
"constant_is_also_output": [True, False],
# This is a regression test for a bug where Toco rejects models with
# unread inputs.
"has_unread_input": [True, False],
}]
def build_graph(parameters):
dummy_input = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape"])
constant = tf.constant(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
outputs = [tf.maximum(dummy_input, constant)]
if parameters["constant_is_also_output"]:
outputs.append(constant)
inputs = [dummy_input]
if parameters["has_unread_input"]:
unread_input = tf.placeholder(
dtype=parameters["dtype"],
name="unread_input",
shape=parameters["input_shape"])
inputs.append(unread_input)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
dummy_input = np.zeros(
parameters["input_shape"], dtype=_TF_TYPE_INFO[parameters["dtype"]][0])
return [dummy_input], sess.run(outputs, feed_dict={inputs[0]: dummy_input})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def make_binary_op_tests(options, binary_operator, expected_tf_failures=0):
"""Make a set of tests to do binary ops with and without broadcast."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True],
},
{
"dtype": [tf.float32],
"input_shape_1": [[5]],
"input_shape_2": [[5]],
"activation": [False, True],
},
{
"dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_1": [[1, 3, 4, 3]],
"input_shape_2": [[3]],
"activation": [True, False],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape_1": [[3]],
"input_shape_2": [[1, 3, 4, 3]],
"activation": [True, False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[]],
"input_shape_2": [[]],
"activation": [False],
},
{
"dtype": [tf.float32],
"input_shape_1": [[0]],
"input_shape_2": [[1]],
"activation": [False],
}
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input1 = tf.placeholder(
dtype=parameters["dtype"],
name="input1",
shape=parameters["input_shape_1"])
input2 = tf.placeholder(
dtype=parameters["dtype"],
name="input2",
shape=parameters["input_shape_2"])
out = binary_operator(input1, input2)
if parameters["activation"]:
out = tf.nn.relu(out)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input1 = create_tensor_data(parameters["dtype"],
parameters["input_shape_1"])
input2 = create_tensor_data(parameters["dtype"],
parameters["input_shape_2"])
return [input1, input2], sess.run(
outputs, feed_dict={
inputs[0]: input1,
inputs[1]: input2
})
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
def make_reduce_tests(reduce_op,
min_value=-10,
max_value=10,
boolean_tensor_only=False):
"""Make a set of tests to do reduce operation.
Args:
reduce_op: TensorFlow reduce operation to test, i.e. `tf.reduce_mean`.
min_value: min value for created tensor data.
max_value: max value for created tensor data.
boolean_tensor_only: If true, will only generate tensor with boolean value.
Returns:
a function representing the true generator with `reduce_op_in` curried.
"""
def f(options):
"""Actual function that generates examples."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[3, 3, 2, 4]],
"axis": [
0, 1, 2, [0, 1], [0, 2], [1, 2], [0, 1, 2], [1, 0], [2, 0],
[2, 1], [2, 1, 0], [2, 0, 1], -1, -2, -3, [1, -1], [0, -1],
[-1, 0], [-1, -2, -3], [0, 0, 0], [2, 2, 0], [1, 0, -3, -3]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[1, 8, 8, 3]],
"axis": [
0, 1, 2, 3, [1, 2], [0, 3], [1, 2, 3], [0, 1, 2,
3], [3, 2, 1, 0],
[3, 1, 0, 2], [2, 0], [3, 0], [3, 1], [1, 0], -1, -2, -3, -4,
[0, -2], [2, 3, -1, 0], [3, 1, 2, -3], [3, -4], [2, 2, 2],
[2, 2, 3], [-3, -3, -4], [-3, 2, 1]
],
"const_axis": [True, False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [[]], # shape is: [0]
"const_axis": [False],
"keepdims": [True, False],
},
{
"input_dtype": [tf.float32],
"input_shape": [[], [1, 8, 8, 3], [3, 2, 4]],
"axis": [None], # shape is: []
"const_axis": [True],
"keepdims": [True, False],
}
]
def build_graph(parameters):
"""Build the mean op testing graph."""
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
input_tensor = tf.placeholder(
dtype=dtype, name="input", shape=parameters["input_shape"])
# Get axis as either a placeholder or constants.
if parameters["const_axis"]:
axis = parameters["axis"]
input_tensors = [input_tensor]
else:
if isinstance(parameters["axis"], list):
shape = [len(parameters["axis"])]
else:
shape = [] # shape for None or integers.
axis = tf.placeholder(dtype=tf.int32, name="axis", shape=shape)
input_tensors = [input_tensor, axis]
out = reduce_op(
input_tensor, axis=axis, keepdims=parameters["keepdims"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
dtype = parameters["input_dtype"]
if boolean_tensor_only:
dtype = tf.bool
values = [
create_tensor_data(
dtype,
parameters["input_shape"],
min_value=min_value,
max_value=max_value)
]
if not parameters["const_axis"]:
values.append(np.array(parameters["axis"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_mean_tests(options):
"""Make a set of tests to do mean."""
return make_reduce_tests(tf.reduce_mean)(options)
@register_make_test_function()
def make_sum_tests(options):
"""Make a set of tests to do sum."""
return make_reduce_tests(tf.reduce_sum)(options)
@register_make_test_function()
def make_reduce_prod_tests(options):
"""Make a set of tests to do prod."""
# set min max value to be -2, 2 to avoid overflow.
return make_reduce_tests(tf.reduce_prod, -2, 2)(options)
@register_make_test_function()
def make_reduce_max_tests(options):
"""Make a set of tests to do max."""
return make_reduce_tests(tf.reduce_max)(options)
@register_make_test_function()
def make_reduce_min_tests(options):
"""Make a set of tests to do min."""
return make_reduce_tests(tf.reduce_min)(options)
@register_make_test_function()
def make_reduce_any_tests(options):
"""Make a set of tests to do any."""
return make_reduce_tests(tf.reduce_any, boolean_tensor_only=True)(options)
@register_make_test_function()
def make_exp_tests(options):
"""Make a set of tests to do exp."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.exp(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-100, max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_cos_tests(options):
"""Make a set of tests to do cos."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the cos op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.cos(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"],
min_value=-np.pi, max_value=np.pi)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_log_softmax_tests(options):
"""Make a set of tests to do log_softmax."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[1, 100], [4, 2], [5, 224]],
}]
def build_graph(parameters):
"""Build the log_softmax op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.nn.log_softmax(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
parameters["input_dtype"],
parameters["input_shape"],
min_value=-100,
max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_maximum_tests(options):
"""Make a set of tests to do maximum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the maximum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.maximum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
@register_make_test_function()
def make_minimum_tests(options):
"""Make a set of tests to do minimum."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape_1": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
"input_shape_2": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the minimum op testing graph."""
input_tensor_1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_1",
shape=parameters["input_shape_1"])
input_tensor_2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input_2",
shape=parameters["input_shape_2"])
out = tf.minimum(input_tensor_1, input_tensor_2)
return [input_tensor_1, input_tensor_2], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_1"]),
create_tensor_data(parameters["input_dtype"],
parameters["input_shape_2"])
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=8)
def make_binary_op_tests_func(binary_operator):
"""Return a function that does a test on a binary operator."""
return lambda options: make_binary_op_tests(options, binary_operator)
@register_make_test_function()
def make_add_tests(options):
make_binary_op_tests(options, tf.add)
@register_make_test_function()
def make_add_n_tests(options):
"""Make a set of tests for AddN op."""
test_parameters = [
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[2, 5, 3, 1]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[5]],
"num_inputs": [2, 3, 4, 5],
},
{
"dtype": [tf.float32, tf.int32],
"input_shape": [[]],
"num_inputs": [2, 3, 4, 5],
},
]
def build_graph(parameters):
"""Builds the graph given the current parameters."""
input_tensors = []
for i in range(parameters["num_inputs"]):
input_tensors.append(
tf.placeholder(
dtype=parameters["dtype"],
name="input_{}".format(i),
shape=parameters["input_shape"]))
out = tf.add_n(input_tensors)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Builds operand inputs for op."""
input_data = []
for i in range(parameters["num_inputs"]):
input_data.append(
create_tensor_data(parameters["dtype"], parameters["input_shape"]))
return input_data, sess.run(
outputs, feed_dict={i: d for i, d in zip(inputs, input_data)})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_div_tests(options):
make_binary_op_tests(options, tf.div)
@register_make_test_function()
def make_sub_tests(options):
make_binary_op_tests(options, tf.subtract)
@register_make_test_function()
def make_mul_tests(options):
make_binary_op_tests(options, tf.multiply)
@register_make_test_function()
def make_pow_tests(options):
make_binary_op_tests(options, tf.pow, expected_tf_failures=7)
@register_make_test_function()
def make_floor_div_tests(options):
make_binary_op_tests(options, tf.floor_div)
@register_make_test_function()
def make_floor_mod_tests(options):
make_binary_op_tests(options, tf.floormod)
@register_make_test_function()
def make_squared_difference_tests(options):
make_binary_op_tests(options, tf.squared_difference)
@register_make_test_function()
def make_gather_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[10], [1, 2, 20]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3], [5]],
"axis": [-1, 0, 1],
},
{
# TODO(b/123895910): add Nd support for strings.
"params_dtype": [tf.string],
"params_shape": [[8]],
"indices_dtype": [tf.int32],
"indices_shape": [[3]],
"axis": [0],
}
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
axis = min(len(parameters["params_shape"]), parameters["axis"])
out = tf.gather(params, indices, axis=axis)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
# Note that TF can't execute with index=1 and params_shape=[10].
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_gather_nd_tests(options):
"""Make a set of tests to do gather_nd."""
test_parameters = [
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 1]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[1, 1]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[2, 1], [2, 2]],
},
{
"params_dtype": [tf.float32, tf.int32, tf.int64],
"params_shape": [[5, 5, 10]],
"indices_dtype": [tf.int32, tf.int64],
"indices_shape": [[3, 1], [2, 2], [2, 3], [2, 1, 3]],
},
]
def build_graph(parameters):
"""Build the gather_nd op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
indices = tf.placeholder(
dtype=parameters["indices_dtype"],
name="indices",
shape=parameters["indices_shape"])
out = tf.gather_nd(params, indices)
return [params, indices], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
indices = create_tensor_data(parameters["indices_dtype"],
parameters["indices_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, indices], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, indices])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_gather_with_constant_tests(options):
"""Make a set of test which feed a constant to gather toco."""
test_parameters = [{
"input_shape": [[3]],
"reference_shape": [[2]],
}, {
"input_shape": [[2, 3]],
"reference_shape": [[2, 3]],
}]
def build_graph(parameters):
"""Build a graph where the inputs to Gather are constants."""
reference = tf.placeholder(
dtype=tf.int32, shape=parameters["reference_shape"])
gather_input = tf.constant(
create_tensor_data(tf.int32, parameters["input_shape"]))
gather_indices = tf.constant([0, 1], tf.int32)
out = tf.equal(reference, tf.gather(gather_input, gather_indices))
return [reference], [out]
def build_inputs(parameters, sess, inputs, outputs):
reference_values = np.zeros(parameters["reference_shape"], dtype=np.int32)
return [reference_values], sess.run(
outputs, feed_dict={inputs[0]: reference_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_embedding_lookup_tests(options):
"""Make a set of tests to do gather."""
test_parameters = [
{
"params_dtype": [tf.float32],
"params_shape": [[10], [10, 10]],
"ids_dtype": [tf.int32],
"ids_shape": [[3], [5]],
},
]
def build_graph(parameters):
"""Build the gather op testing graph."""
params = tf.placeholder(
dtype=parameters["params_dtype"],
name="params",
shape=parameters["params_shape"])
ids = tf.placeholder(
dtype=parameters["ids_dtype"],
name="ids",
shape=parameters["ids_shape"])
out = tf.nn.embedding_lookup(params, ids)
return [params, ids], [out]
def build_inputs(parameters, sess, inputs, outputs):
params = create_tensor_data(parameters["params_dtype"],
parameters["params_shape"])
ids = create_tensor_data(parameters["ids_dtype"],
parameters["ids_shape"], 0,
parameters["params_shape"][0] - 1)
return [params, ids], sess.run(
outputs, feed_dict=dict(zip(inputs, [params, ids])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs)
@register_make_test_function()
def make_global_batch_norm_tests(options):
"""Make a set of tests to do batch_norm_with_global_normalization."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2], [3, 4, 5, 4]],
"epsilon": [0.1, 0.0001],
"scale_after": [True, False],
}]
def build_graph(parameters):
"""Build the global batch norm testing graph."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
x_norm = tf.nn.batch_norm_with_global_normalization(
x, mean, variance, scale, offset,
parameters["epsilon"], parameters["scale_after"])
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fused_batch_norm_tests(options):
"""Make a set of tests to do fused_batch_norm."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 1, 6, 2]],
"epsilon": [0.001, 0.1],
}]
def build_graph(parameters):
"""Build the testing graph for fused batch normalization."""
input_shape = parameters["input_shape"]
scale_shape = input_shape[3]
scale = create_tensor_data(parameters["dtype"], scale_shape)
offset = create_tensor_data(parameters["dtype"], scale_shape)
mean = create_tensor_data(parameters["dtype"], scale_shape)
variance = create_tensor_data(parameters["dtype"], scale_shape)
x = create_tensor_data(parameters["dtype"], parameters["input_shape"])
[x_norm, _, _] = tf.nn.fused_batch_norm(
x, scale, offset, mean, variance,
parameters["epsilon"], data_format="NHWC", is_training=False)
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.add(input_tensor, x_norm)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_conv_tests(options):
"""Make a set of tests to do convolution."""
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True, False],
"channel_multiplier": [1, 2],
"fully_quantize": [False],
},
# TODO(b/134702301): The fully_quantize param is just ignored by the MLIR
# testing path now, resulting in duplicate tests. Either ignore these
# tests or handle it properly in the mlir_convert() function.
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"], # TODO(aselle): NCHW would be good
"constant_filter": [True],
"channel_multiplier": [1, 2],
"fully_quantize": [True],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(
np.float32, filter_shape, min_value=-10, max_value=10)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1)
]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
# Note: This is a regression test for a bug (b/122651451) that Toco incorrectly
# erases the reduction indices array while it's shared with other ops.
@register_make_test_function()
def make_l2norm_shared_epsilon_tests(options):
"""Regression test for a bug (b/122651451)."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7]],
"dim": [1],
"epsilon": [1e-8],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
epsilon = tf.constant(parameters["epsilon"])
out1 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out2 = tf.nn.l2_normalize(input_tensor, parameters["dim"], epsilon=epsilon)
out = out1 + out2
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112436267) that Toco incorrectly
# fuses weights when multiple Conv2D/FULLY_CONNECTED ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 3]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
input_tensors = [input_tensor]
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
# Ensure that FuseBinaryIntoFollowingAffine works with an input which
# is shared by multiple affine ops.
conv_input = input_tensor + 0.1
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
conv_input,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add MUL ops after Conv2D ops. These MUL ops should be fused into the
# weights of Conv2D.
result1 = result1 * 2
result2 = result2 * 3
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Note: This is a regression test for a bug (b/112303004) that Toco incorrectly
# transforms Conv into DepthwiseConv when two Conv ops share the same constant
# weight tensor.
@register_make_test_function()
def make_conv_to_depthwiseconv_with_shared_weights_tests(options):
"""Make a test where 2 Conv ops shared the same constant weight tensor."""
test_parameters = [{
"input_shape": [[1, 10, 10, 1]],
"filter_shape": [[3, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [3],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Construct a constant weights tensor which will be used by both Conv2D.
filter_tensor = tf.constant(
create_tensor_data(np.float32, filter_shape), dtype=tf.float32)
input_tensors = [input_tensor]
# Construct 2 Conv2D operations which use exactly the same input and
# weights.
result1 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
result2 = tf.nn.conv2d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
# Add the 2 results up.
out = result1 + result2
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, unused_filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_depthwiseconv_tests(options):
"""Make a set of tests to do convolution."""
# Tensorflow only supports equal strides
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [1, 10, 10, 3]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"dilations": [[1, 1, 1, 1], [1, 3, 2, 1], [1, 2, 2, 1]],
"channel_multiplier": [1, 2],
"rate": [[1, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
},
{
"input_shape": [[1, 3, 4, 3]],
"filter_size": [[1, 1]],
"strides": [[1, 1, 2, 1]], # TF needs [1, x, x, 1]
"dilations": [[1, 1, 1, 1], [1, 2, 2, 1]],
"channel_multiplier": [2],
"rate": [[2, 2]], # Only [1, 1] is supported
"padding": ["SAME"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
}
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a depthwise conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(np.float32, filter_shape)
input_tensors = [input_tensor]
else:
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.depthwise_conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
rate=parameters["rate"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [create_tensor_data(np.float32, input_shape)]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_split_tests(options):
"""Make a set of tests to do tf.split."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"num_or_size_splits": [1, 2, 3, 4, 5],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(
input_tensor, parameters["num_or_size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=112)
@register_make_test_function()
def make_splitv_tests(options):
"""Make a set of tests to do tf.split_v."""
test_parameters = [{
"input_shape": [[1, 3, 4, 6], [2, 4, 1], [6, 4], [8]],
"size_splits": [[2, 2], [1, 3], [4, 2], [5, 3],
[-1, 1], [-1, 2], [-1, 4]],
"axis": [0, 1, 2, 3, -4, -3, -2, -1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.split(input_tensor, parameters["size_splits"], parameters["axis"])
return [input_tensor], [out[0]]
def build_inputs(parameters, sess, inputs, outputs):
values = [create_tensor_data(np.float32, parameters["input_shape"])]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=158)
@register_make_test_function()
def make_concat_tests(options):
"""Make a set of tests to do concatenation."""
test_parameters = [{
"base_shape": [[1, 3, 4, 3], [3, 4]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3, -3, -2, -1],
"type": [tf.float32, tf.uint8, tf.int32, tf.int64],
}]
def get_shape(parameters, delta):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < 0:
axis += len(shape)
if axis < len(shape):
shape[axis] += delta
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(dtype=parameters["type"],
name=("input%d" % n),
shape=get_shape(parameters, n))
all_tensors.append(input_tensor)
out = tf.concat(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for n in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(
parameters["type"], get_shape(parameters, n))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=60)
@register_make_test_function()
def make_fully_connected_tests(options):
"""Make a set of tests to do fully_connected."""
test_parameters = [{
"shape1": [[3, 3]],
"shape2": [[3, 3]],
"transpose_a": [True, False],
"transpose_b": [True, False],
"constant_filter": [True, False],
}, {
"shape1": [[4, 4], [1, 4], [4]],
"shape2": [[4, 4], [4, 1], [4]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[37, 40]],
"transpose_a": [False],
"transpose_b": [False],
"constant_filter": [True, False],
}, {
"shape1": [[40, 37]],
"shape2": [[40, 37]],
"transpose_a": [False],
"transpose_b": [True],
"constant_filter": [True, False],
}]
def build_graph(parameters):
"""Build a matmul graph given `parameters`."""
input_tensor1 = tf.placeholder(dtype=tf.float32, name="input1",
shape=parameters["shape1"])
# Get input_tensor2 either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
input_tensor2 = create_tensor_data(np.float32, parameters["shape2"])
input_tensors = [input_tensor1]
else:
input_tensor2 = tf.placeholder(
dtype=tf.float32, name="input2", shape=parameters["shape2"])
input_tensors = [input_tensor1, input_tensor2]
out = tf.matmul(input_tensor1, input_tensor2,
transpose_a=parameters["transpose_a"],
transpose_b=parameters["transpose_b"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input_values1) or 2
# tensors (input_values1, input_values2) based on whether the second input
# is a constant or variable input.
values = [create_tensor_data(np.float32, shape=parameters["shape1"])]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, parameters["shape2"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=10)
@register_make_test_function()
def make_l2norm_tests(options):
"""Make a set of tests to do l2norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[5, 7], [1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3],
[3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"dim": [0, 1, 2, 3, [2, 3], -2],
"epsilon": [None, 1e-12, 1e-3],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
if parameters["epsilon"]:
out = tf.nn.l2_normalize(
input_tensor, parameters["dim"], epsilon=parameters["epsilon"])
else:
out = tf.nn.l2_normalize(input_tensor, parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_local_response_norm_tests(options):
"""Make a set of tests to do local_response_norm."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[1, 1, 1, 1], [1, 3, 4, 3], [3, 15, 14, 3]],
"depth_radius": [None, 0, 1, 3, 5],
"bias": [None, 0.3, -0.1],
"alpha": [None, 2, -3],
"beta": [None, 0.25, 2],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.local_response_normalization(
input_tensor, depth_radius=parameters["depth_radius"],
bias=parameters["bias"], alpha=parameters["alpha"],
beta=parameters["beta"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-4, max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pad_tests(options):
"""Make a set of tests to do pad."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[1, 2]]],
"constant_paddings": [False],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_padv2_tests(options):
"""Make a set of tests to do padv2."""
# TODO(nupurgarg): Add test for tf.uint8.
test_parameters = [
# 4D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 1, 2, 1], [2, 1, 1, 1]],
"paddings": [[[0, 0], [0, 1], [2, 3], [0, 0]], [[0, 1], [0, 0],
[0, 0], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 2D:
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2]],
"paddings": [[[0, 1], [2, 3]]],
"constant_paddings": [True, False],
"constant_values": [0, 2],
},
# 1D:
{
"dtype": [tf.int32],
"input_shape": [[1]],
"paddings": [[[0, 1]]],
"constant_paddings": [False],
"constant_values": [0, 2],
},
]
def build_graph(parameters):
"""Build a pad graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
# Get paddings as either a placeholder or constants.
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="padding", shape=shape)
input_tensors = [input_tensor, paddings]
out = tf.pad(input_tensor, paddings=paddings,
constant_values=parameters["constant_values"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reshape_tests(options):
"""Make a set of tests to do reshape."""
# All shapes below are suitable for tensors with 420 elements.
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[3, 4, 5, 7], [4, 105], [21, 5, 2, 2], [420]],
"output_shape": [[15, 28], [420], [1, -1, 5, 7], [-1]],
"constant_shape": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1]],
"output_shape": [[]],
"constant_shape": [True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
# Get shape as either a placeholder or constants.
if parameters["constant_shape"]:
output_shape = parameters["output_shape"]
input_tensors = [input_tensor]
else:
# The shape of the shape tensor.
shape_tensor_shape = [len(parameters["output_shape"])]
output_shape = tf.placeholder(
dtype=tf.int32, name="output_shape", shape=shape_tensor_shape)
input_tensors = [input_tensor, output_shape]
out = tf.reshape(input_tensor, shape=output_shape)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_shape_tests(options):
"""Make a set of tests to do shape."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
"out_type": [tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the shape op testing graph."""
# Note that we intentionally leave out the shape from the input placeholder
# to prevent the Shape operation from being optimized out during conversion.
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.shape(input_value, out_type=parameters["out_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_rank_tests(options):
"""Make a set of tests to do rank."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [0], [1, 1, 1, 3], [2, 3, 4, 5], [5, 5], [10]],
}]
def build_graph(parameters):
"""Build the rank op testing graph."""
input_value = tf.placeholder(dtype=parameters["input_dtype"], name="input")
out = tf.rank(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_one_hot_tests(options):
"""Make a set of tests to do one_hot."""
test_parameters = [{
"indices_type": [tf.int32, tf.int64],
"indices_shape": [[3], [4, 4], [1, 5], [5, 1]],
"axis": [0, 1],
"dtype": [tf.int32, tf.int64, tf.float32],
"provide_optional_inputs": [True, False],
}]
def build_graph(parameters):
indices = tf.placeholder(
dtype=parameters["indices_type"],
name="indices",
shape=parameters["indices_shape"])
depth = tf.placeholder(dtype=tf.int32, name="depth", shape=())
if not parameters["provide_optional_inputs"]:
out = tf.one_hot(indices=indices, depth=depth)
return [indices, depth], [out]
on_value = tf.placeholder(
dtype=parameters["dtype"], name="on_value", shape=())
off_value = tf.placeholder(
dtype=parameters["dtype"], name="off_value", shape=())
out = tf.one_hot(
indices=indices,
depth=depth,
on_value=on_value,
off_value=off_value,
axis=parameters["axis"],
dtype=parameters["dtype"])
return [indices, depth, on_value, off_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(
parameters["indices_type"],
shape=parameters["indices_shape"],
min_value=-1,
max_value=10),
create_tensor_data(tf.int32, shape=None, min_value=1, max_value=10),
]
if parameters["provide_optional_inputs"]:
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=1, max_value=10))
input_values.append(
create_tensor_data(
parameters["dtype"], shape=None, min_value=-1, max_value=0))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_bilinear_tests(options):
"""Make a set of tests to do resize_bilinear."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [None, True, False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.image.resize_bilinear(input_tensor, size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_resize_nearest_neighbor_tests(options):
"""Make a set of tests to do resize_nearest_neighbor."""
test_parameters = [{
"dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [1, 10, 2, 1]],
"size": [[1, 1], [4, 3], [2, 2], [5, 6]],
"align_corners": [False],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.image.resize_nearest_neighbor(
input_tensor,
size=parameters["size"],
align_corners=parameters["align_corners"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sigmoid_tests(options):
"""Make a set of tests to do sigmoid."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [4], [], [1, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.sigmoid(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_softmax_tests(options):
"""Make a set of tests to do softmax."""
test_parameters = [{
"dtype": [tf.float32],
"input_shape": [[1, 3, 4, 3], [2, 3]],
"dim": [-1, 0],
}, {
"dtype": [tf.float32],
"input_shape": [[4, 7]],
"dim": [-1, 1],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.nn.softmax(input_tensor, dim=parameters["dim"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_depth_tests(options):
"""Make a set of tests to do space_to_depth."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.uint8, tf.int64],
"input_shape": [[2, 12, 24, 1]],
"block_size": [2, 3, 4],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(dtype=parameters["dtype"], name="input",
shape=parameters["input_shape"])
out = tf.space_to_depth(input_tensor, block_size=parameters["block_size"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_space_to_batch_nd_tests(options):
"""Make a set of tests to do space_to_batch_nd."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [
{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[1, 2, 2, 3], [2, 2, 4, 1]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
{
"dtype": [tf.float32],
"input_shape": [[2, 3, 7, 3]],
"block_shape": [[1, 3], [2, 2]],
"paddings": [[[0, 0], [2, 0]], [[1, 0], [1, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
# Non-4D use case: 1 bath dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[1, 4, 4, 4, 1, 1]],
"block_shape": [[2, 2, 2]],
"paddings": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_paddings": [True, False],
},
]
def build_graph(parameters):
"""Build a space_to_batch graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get paddings either as a const or as a placeholder (tensor).
if parameters["constant_paddings"]:
paddings = parameters["paddings"]
else:
shape = [len(parameters["paddings"]), 2]
paddings = tf.placeholder(dtype=tf.int32, name="paddings", shape=shape)
input_tensors.append(paddings)
out = tf.space_to_batch_nd(input_tensor, block_shape, paddings)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_paddings"]:
values.append(np.array(parameters["paddings"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=56)
@register_make_test_function()
def make_batch_to_space_nd_tests(options):
"""Make a set of tests to do batch_to_space_nd."""
test_parameters = [
{
"dtype": [tf.float32, tf.int64, tf.int32],
"input_shape": [[12, 3, 3, 1]],
"block_shape": [[1, 4], [2, 2], [3, 4]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
# Single batch (no-op)
{
"dtype": [tf.float32],
"input_shape": [[1, 3, 3, 1]],
"block_shape": [[1, 1]],
"crops": [[[0, 0], [0, 0]], [[1, 1], [1, 1]]],
"constant_block_shape": [True],
"constant_crops": [True],
},
# Non-4D use case: 1 batch dimension, 3 spatial dimensions, 2 others.
{
"dtype": [tf.float32],
"input_shape": [[8, 2, 2, 2, 1, 1]],
"block_shape": [[2, 2, 2]],
"crops": [[[0, 0], [0, 0], [0, 0]]],
"constant_block_shape": [True, False],
"constant_crops": [True, False],
},
]
def build_graph(parameters):
"""Build a batch_to_space graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
input_tensors = [input_tensor]
# Get block_shape either as a const or as a placeholder (tensor).
if parameters["constant_block_shape"]:
block_shape = parameters["block_shape"]
else:
shape = [len(parameters["block_shape"])]
block_shape = tf.placeholder(dtype=tf.int32, name="shape", shape=shape)
input_tensors.append(block_shape)
# Get crops either as a const or as a placeholder (tensor).
if parameters["constant_crops"]:
crops = parameters["crops"]
else:
shape = [len(parameters["crops"]), 2]
crops = tf.placeholder(dtype=tf.int32, name="crops", shape=shape)
input_tensors.append(crops)
out = tf.batch_to_space_nd(input_tensor, block_shape, crops)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_block_shape"]:
values.append(np.array(parameters["block_shape"]))
if not parameters["constant_crops"]:
values.append(np.array(parameters["crops"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_transpose_tests(options):
"""Make a set of tests to do transpose."""
# TODO(nupurgarg): Add test for uint8.
test_parameters = [{
"dtype": [tf.int32, tf.int64, tf.float32],
"input_shape": [[2, 2, 3]],
"perm": [[0, 1, 2], [0, 2, 1]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4]],
"perm": [[0, 1, 2, 3], [3, 0, 1, 2]],
"constant_perm": [True, False],
}, {
"dtype": [tf.float32],
"input_shape": [[1, 2, 3, 4, 5]],
"perm": [[4, 3, 2, 1, 0]],
"constant_perm": [True, False],
}]
def build_graph(parameters):
"""Build a transpose graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_perm"]:
perm = parameters["perm"]
input_tensors = [input_tensor]
else:
shape = [len(parameters["perm"]), 2]
perm = tf.placeholder(dtype=tf.int32, name="perm", shape=shape)
input_tensors = [input_tensor, perm]
out = tf.transpose(input_tensor, perm=perm)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(parameters["dtype"], parameters["input_shape"])
]
if not parameters["constant_perm"]:
values.append(np.array(parameters["perm"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=9)
@register_make_test_function()
def make_squeeze_tests(options):
"""Make a set of tests to do squeeze."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 2, 1, 3, 1, 4, 1, 1]],
"axis": [
None, [], [0, 2], [4, 7], [-1, 0, 2, 0, 7, -6], [1], [2, 3, 2],
[-1, -2, -4, -6, -8], [0, 2, 4, 6, 7], [7, 6, 4, 2, 0], [6, 6],
[0, 1, 2, 3, 4, 5, 6, 7], [-2, -3, 1, 0, 7, -5]
],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1]],
"axis": [None, [], [0], [-1]],
}, {
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 1, 1, 1, 1]],
"axis": [None, [], [0], [3, 0], [-2, 0, 3, 2]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
@register_make_test_function()
def make_squeeze_transpose_tests(options):
"""Make a set of tests to do squeeze followed by transpose."""
test_parameters = [{
"dtype": [tf.int32, tf.float32, tf.int64],
"input_shape": [[1, 4, 10, 1]],
"axis": [[-1], [3]],
}]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.squeeze(input_tensor, axis=parameters["axis"])
out = tf.transpose(out, perm=[1, 2])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
def _make_strided_slice_tests(options, test_parameters,
expected_tf_failures=0):
"""Utility function to make strided_slice_tests based on parameters."""
def build_graph(parameters):
"""Build graph for stride_slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["constant_indices"]:
begin = parameters["begin"]
end = parameters["end"]
strides = parameters["strides"]
tensors = [input_tensor]
else:
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
end = tf.placeholder(
dtype=parameters["index_type"],
name="end",
shape=[len(parameters["input_shape"])])
strides = (
tf.placeholder(
dtype=parameters["index_type"],
name="strides",
shape=[len(parameters["input_shape"])])
if parameters["strides"] is not None else None)
tensors = [input_tensor, begin, end]
if strides is not None:
tensors.append(strides)
out = tf.strided_slice(
input_tensor,
begin,
end,
strides,
begin_mask=parameters["begin_mask"],
end_mask=parameters["end_mask"])
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for stride_slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
values = [input_values]
if not parameters["constant_indices"]:
begin_values = np.array(parameters["begin"]).astype(index_type)
end_values = np.array(parameters["end"]).astype(index_type)
stride_values = (
np.array(parameters["strides"]).astype(index_type)
if parameters["strides"] is not None else None)
values.append(begin_values)
values.append(end_values)
if stride_values is not None:
values.append(stride_values)
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
@register_make_test_function()
def make_strided_slice_tests(options):
"""Make a set of tests to do strided_slice."""
# TODO(soroosh): add test/support for uint8.
test_parameters = [
# 4-D (basic cases with const/non-const indices).
{
"dtype": [tf.float32, tf.int32, tf.int64],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin": [[0, 0, 0, 0]],
"end": [[12, 2, 2, 5]],
"begin_mask": [None],
"end_mask": [None],
"shrink_axis_mask": [None],
"constant_indices": [False, True],
},
# 4-D with non-trivial begin & end.
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"end": [[8, 2, 2, 3], [12, 2, 2, 5]],
"strides": [None, [2, 1, 3, 1]],
"begin_mask": [None, 8],
"end_mask": [None, 3],
"shrink_axis_mask": [None, 15, -1],
"constant_indices": [True],
},
# Begin, end, strides dim are different from input shape
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0]],
"end": [[1]],
"strides": [None, [1]],
"begin_mask": [0],
"end_mask": [0],
"shrink_axis_mask": [1],
"constant_indices": [True],
},
# 2-D
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, 0]],
"end": [[2, 2]],
"strides": [None, [2, 2]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False, True],
},
# Negative strides
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[2, 3]],
"begin": [[0, -1]],
"end": [[2, -3]],
"strides": [[1, -1]],
"begin_mask": [None, 1, 2],
"end_mask": [None, 1, 2],
"shrink_axis_mask": [None, 1, 2, 3, -1],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters, expected_tf_failures=2)
@register_make_test_function()
def make_strided_slice_1d_exhaustive_tests(options):
"""Make a set of exhaustive tests for 1D strided_slice."""
test_parameters = [
# 1-D Exhaustive
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[3]],
"begin": [[-2], [-1], [0], [1], [2]],
"end": [[-2], [-1], [0], [1], [2]],
"strides": [[-2], [-1], [1], [2]],
"begin_mask": [0, 1],
"end_mask": [0, 1],
"shrink_axis_mask": [0],
"constant_indices": [False],
},
]
_make_strided_slice_tests(options, test_parameters)
# For verifying https://github.com/tensorflow/tensorflow/issues/23599
# TODO(chaomei): refactor the test to cover more cases, like negative stride,
# negative array index etc.
@register_make_test_function()
def make_resolve_constant_strided_slice_tests(options):
"""Make a set of tests to show strided_slice yields incorrect results."""
test_parameters = [{
"unused_iteration_counter": [1],
}]
def build_graph(parameters):
"""Build the strided_slice op testing graph."""
del parameters
input_values = tf.placeholder(dtype=tf.float32, shape=[4, 2])
data = tf.constant([[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11],
[12, 13, 14, 15]], tf.float32)
return [input_values], [input_values + data[:, :2]]
def build_inputs(parameters, sess, inputs, outputs):
del parameters
input_values = np.zeros([4, 2], dtype=np.float32)
return [input_values], sess.run(
outputs, feed_dict={inputs[0]: input_values})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_lstm_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batchs": [1],
"time_step_size": [1],
"input_vec_size": [3],
"num_cells": [4],
"split_tflite_lstm_inputs": [False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in xrange(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batchs, input_vec_size])
inputs_after_split.append(one_timestamp_input)
# Currently lstm identifier has a few limitations: only supports
# forget_bias == 0, inner state activation == tanh.
# TODO(zhixianyan): Add another test with forget_bias == 1.
# TODO(zhixianyan): Add another test with relu as activation.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(
num_cells, forget_bias=0.0, state_is_tuple=True)
cell_outputs, _ = rnn.static_rnn(
lstm_cell, inputs_after_split, dtype=tf.float32)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(
parameters["dtype"], [kernel.shape[0], kernel.shape[1]], -1, 1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batchs = parameters["num_batchs"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in xrange(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batchs, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
# TODO(zhixianyan): Automatically generate rnn_states for lstm cell.
extra_toco_options = ExtraTocoOptions()
extra_toco_options.rnn_states = (
"{state_array:rnn/BasicLSTMCellZeroState/zeros,"
"back_edge_source_array:rnn/basic_lstm_cell/Add_1,size:4},"
"{state_array:rnn/BasicLSTMCellZeroState/zeros_1,"
"back_edge_source_array:rnn/basic_lstm_cell/Mul_2,size:4}")
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
use_frozen_graph=True)
def make_l2_pool(input_tensor, ksize, strides, padding, data_format):
"""Given an input perform a sequence of TensorFlow ops to produce l2pool."""
return tf.sqrt(tf.nn.avg_pool(
tf.square(input_tensor), ksize=ksize, strides=strides,
padding=padding, data_format=data_format))
@register_make_test_function()
def make_topk_tests(options):
"""Make a set of tests to do topk."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[10], [5, 20]],
"input_k": [None, 1, 3],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
if parameters["input_k"] is not None:
k = tf.placeholder(dtype=tf.int32, name="input_k", shape=[])
inputs = [input_value, k]
else:
k = tf.constant(3, name="k")
inputs = [input_value]
out = tf.nn.top_k(input_value, k)
return inputs, [out[1]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
if parameters["input_k"] is not None:
k = np.array(parameters["input_k"], dtype=np.int32)
return [input_value, k], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value, k])))
else:
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_arg_min_max_tests(options):
"""Make a set of tests to do arg_max."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[], [1, 1, 1, 3], [2, 3, 4, 5], [2, 3, 3], [5, 5], [10]],
"output_type": [tf.int32, tf.int64],
"is_arg_max": [True],
}]
def build_graph(parameters):
"""Build the topk op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
axis = random.randint(0, max(len(parameters["input_shape"]) - 1, 0))
if parameters["is_arg_max"]:
out = tf.arg_max(input_value, axis, output_type=parameters["output_type"])
else:
out = tf.arg_min(input_value, axis, output_type=parameters["output_type"])
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=4)
@register_make_test_function()
def make_equal_tests(options):
"""Make a set of tests to do equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([], []),
([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_not_equal_tests(options):
"""Make a set of tests to do not equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the not euqal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.not_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_tests(options):
"""Make a set of tests to do greater."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_greater_equal_tests(options):
"""Make a set of tests to do greater_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the greater_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.greater_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_tests(options):
"""Make a set of tests to do less."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_less_equal_tests(options):
"""Make a set of tests to do less_equal."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape_pair": [([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the less_equal op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_pair"][1])
out = tf.less_equal(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
@register_make_test_function()
def make_floor_tests(options):
"""Make a set of tests to do floor."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the floor op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.floor(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_ceil_tests(options):
"""Make a set of tests to do ceil."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the ceil op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.ceil(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_round_tests(options):
"""Build the round op testing graph."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the round op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = tf.round(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_neg_tests(options):
"""Make a set of tests to do neg."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 4, 3], [5], []],
}]
def build_graph(parameters):
"""Build the neg op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.negative(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_zeros_like_tests(options):
"""Make a set of tests to do zeros_like."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the zeros_like op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
zeros = tf.zeros_like(input_tensor)
# This maximum node is so that toco can perform the constants-propagation
# through the above zeros_like, which it can't do if the output of the
# zeros_like as an output of the whole graphs (graph outputs can't be
# constants). If toco does not perform such constants-propagation then
# the resulting tflite graph retains the zeros_like as a Fill op, which
# is unsupported by TFLite, even as a custom op.
out = tf.maximum(zeros, input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [values], sess.run(outputs, feed_dict=dict(zip(inputs, [values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
def _make_elementwise_tests(op):
"""Make a set of tests to do element-wise operations."""
def f(options):
"""Actual function that generates examples."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [1], [1, 2], [5, 6, 7, 8], [3, 4, 5, 6]],
}]
def build_graph(parameters):
"""Build the unary op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input1",
shape=parameters["input_shape"])
out = op(input_value)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict={inputs[0]: input_value})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
return f
@register_make_test_function()
def make_sin_tests(options):
"""Make a set of tests to do sin."""
return _make_elementwise_tests(tf.sin)(options)
@register_make_test_function()
def make_log_tests(options):
"""Make a set of tests to do log."""
return _make_elementwise_tests(tf.log)(options)
@register_make_test_function()
def make_sqrt_tests(options):
"""Make a set of tests to do sqrt."""
return _make_elementwise_tests(tf.sqrt)(options)
@register_make_test_function()
def make_rsqrt_tests(options):
"""Make a set of tests to do 1/sqrt."""
return _make_elementwise_tests(tf.rsqrt)(options)
@register_make_test_function()
def make_square_tests(options):
"""Make a set of tests to do square."""
return _make_elementwise_tests(tf.square)(options)
@register_make_test_function()
def make_where_tests(options):
"""Make a set of tests to do where."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 4]),],
"use_where_v2": [False, True],
},
{
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 2, 3, 1]),],
"use_where_v2": [True],
},
]
def build_graph(parameters):
"""Build the where op testing graph."""
input_value1 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input2",
shape=parameters["input_shape_set"][0])
input_value2 = tf.placeholder(
dtype=parameters["input_dtype"],
name="input3",
shape=parameters["input_shape_set"][1])
less = tf.less(input_value1, input_value2)
where = tf.where_v2 if parameters["use_where_v2"] else tf.where
out = where(less, input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_slice_tests(options):
"""Make a set of tests to do slice."""
# TODO(renjieliu): add test/support for uint8.
test_parameters = [
# 4-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[12, 2, 2, 5]],
"begin": [[0, 0, 0, 0], [1, 0, 1, 0]],
"size": [[8, 2, 2, 3], [11, 2, 1, 5]],
},
# 2-D
{
"dtype": [tf.float32, tf.int32, tf.int64, tf.string],
"index_type": [tf.int32, tf.int64],
"input_shape": [[2, 3]],
"begin": [[0, 0], [1, 0]],
"size": [[2, 3], [2, 2]],
},
# 4-D with size -1
{
"dtype": [tf.float32],
"index_type": [tf.int32],
"input_shape": [[4, 4, 4, 4]],
"begin": [[0, 0, 0, 0], [1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0],
[0, 0, 0, 1]],
"size": [[-1, 1, 1, 1], [1, -1, 1, 1], [1, 1, -1, 1], [1, 1, 1, -1]],
},
]
def build_graph(parameters):
"""Build graph for slice test."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
begin = tf.placeholder(
dtype=parameters["index_type"],
name="begin",
shape=[len(parameters["input_shape"])])
size = tf.placeholder(
dtype=parameters["index_type"],
name="size",
shape=[len(parameters["input_shape"])])
tensors = [input_tensor, begin, size]
out = tf.slice(input_tensor, begin, size)
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for slice test."""
input_values = create_tensor_data(parameters["dtype"],
parameters["input_shape"])
index_type = _TF_TYPE_INFO[parameters["index_type"]][0]
begin_values = np.array(parameters["begin"]).astype(index_type)
size_values = np.array(parameters["size"]).astype(index_type)
values = [input_values, begin_values, size_values]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=24)
@register_make_test_function()
def make_conv2d_transpose_tests(options):
"""Make a set of tests to do transpose_conv."""
test_parameters = [{
"input_shape": [[1, 50, 54, 3]],
"filter_shape": [[1, 1, 8, 3], [1, 2, 8, 3], [1, 3, 8, 3], [1, 4, 8, 3]],
"output_shape": [[1, 100, 108, 8]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 16, 1, 512]],
"filter_shape": [[4, 1, 512, 512]],
"output_shape": [[1, 32, 1, 512]],
"dynamic_output_shape": [True, False],
}, {
"input_shape": [[1, 128, 128, 1]],
"filter_shape": [[4, 4, 1, 1]],
"output_shape": [[1, 256, 256, 1]],
"dynamic_output_shape": [True, False],
}]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_tensor = tf.placeholder(
dtype=tf.float32, name="filter", shape=parameters["filter_shape"])
input_tensors = [input_tensor, filter_tensor]
if parameters["dynamic_output_shape"]:
output_shape = tf.placeholder(dtype=tf.int32, shape=[4])
input_tensors.append(output_shape)
else:
output_shape = parameters["output_shape"]
out = tf.nn.conv2d_transpose(
input_tensor,
filter_tensor,
output_shape=output_shape,
padding="SAME",
strides=(1, 2, 2, 1))
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(np.float32, parameters["input_shape"]),
create_tensor_data(np.float32, parameters["filter_shape"])
]
if parameters["dynamic_output_shape"]:
values.append(np.array(parameters["output_shape"]))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
# Since compute output_shape is fairly complicated for
# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a
# "conv2d" operation to get the output, then we use the output to feed in
# tf.nn.conv2d_backprop_input.
# This test will depend on the "conv2d" operation's correctness.
@register_make_test_function()
def make_transpose_conv_tests(options):
"""Make a set of tests to do transpose_conv."""
# Tensorflow only supports equal strides
test_parameters = [{
"input_shape": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"channel_multiplier": [1, 2],
}]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_input = tf.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
conv_outputs = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
out = tf.nn.conv2d_backprop_input(
input_shape,
filter_input,
conv_outputs,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
input_tensors = [input_tensor, filter_input]
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape),
create_tensor_data(np.float32, filter_shape)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_tile_tests(options):
"""Make a set of tests to do tile."""
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.bool],
"input_shape": [[3, 2, 1], [2, 2, 2]],
"multiplier_dtype": [tf.int32, tf.int64],
"multiplier_shape": [[3]]
}]
def build_graph(parameters):
"""Build the tile op testing graph."""
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
shape=parameters["input_shape"],
name="input")
multiplier_value = tf.placeholder(
dtype=parameters["multiplier_dtype"],
shape=parameters["multiplier_shape"],
name="multiplier")
out = tf.tile(input_value, multiplier_value)
return [input_value, multiplier_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
multipliers_value = create_tensor_data(
parameters["multiplier_dtype"],
parameters["multiplier_shape"],
min_value=0)
return [input_value, multipliers_value], sess.run(
outputs,
feed_dict={
inputs[0]: input_value,
inputs[1]: multipliers_value
})
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_expand_dims_tests(options):
"""Make a set of tests to do expand_dims."""
test_parameters = [{
"input_type": [tf.float32, tf.int32],
"input_shape": [[5, 4]],
"axis_value": [0, 1, 2, -1, -2, -3],
"constant_axis": [True, False],
}]
def build_graph(parameters):
"""Build the where op testing graph."""
inputs = []
input_value = tf.placeholder(
dtype=parameters["input_type"],
name="input",
shape=parameters["input_shape"])
inputs.append(input_value)
if parameters["constant_axis"]:
axis_value = tf.constant(
parameters["axis_value"], dtype=tf.int32, shape=[1])
else:
axis_value = tf.placeholder(dtype=tf.int32, name="axis", shape=[1])
inputs.append(axis_value)
out = tf.expand_dims(input_value, axis=axis_value)
return inputs, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
input_values.append(
create_tensor_data(parameters["input_type"], parameters["input_shape"]))
if not parameters["constant_axis"]:
input_values.append(np.array([parameters["axis_value"]], dtype=np.int32))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_sparse_to_dense_tests(options):
"""Make a set of tests to do sparse to dense."""
test_parameters = [{
"value_dtype": [tf.float32, tf.int32, tf.int64],
"index_dtype": [tf.int32, tf.int64],
"value_count": [1, 3, 6, 8],
"dense_shape": [[15], [3, 10], [4, 4, 4, 4], [7, 10, 9]],
"default_value": [0, -1],
"value_is_scalar": [True, False],
}]
# Return a single value for 1-D dense shape, but a tuple for other shapes.
def generate_index(dense_shape):
if len(dense_shape) == 1:
return np.random.randint(dense_shape[0])
else:
index = []
for shape in dense_shape:
index.append(np.random.randint(shape))
return tuple(index)
def build_graph(parameters):
"""Build the sparse_to_dense op testing graph."""
dense_shape = parameters["dense_shape"]
# Special handle for value_is_scalar case.
# value_count must be 1.
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
value = tf.placeholder(
name="value", dtype=parameters["value_dtype"], shape=())
else:
value = tf.placeholder(
name="value",
dtype=parameters["value_dtype"],
shape=[parameters["value_count"]])
indices = set()
while len(indices) < parameters["value_count"]:
indices.add(generate_index(dense_shape))
indices = tf.constant(tuple(indices), dtype=parameters["index_dtype"])
# TODO(renjieliu): Add test for validate_indices case.
out = tf.sparse_to_dense(
indices,
dense_shape,
value,
parameters["default_value"],
validate_indices=False)
return [value], [out]
def build_inputs(parameters, sess, inputs, outputs):
if parameters["value_is_scalar"] and parameters["value_count"] == 1:
input_value = create_scalar_data(parameters["value_dtype"])
else:
input_value = create_tensor_data(parameters["value_dtype"],
[parameters["value_count"]])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_pack_tests(options):
"""Make a set of tests to do stack."""
test_parameters = [
# Avoid creating all combinations to keep the test size small.
{
"dtype": [tf.float32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [1, 2, 3, 4, 5, 6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int32],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [6],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
},
{
"dtype": [tf.int64],
"base_shape": [[3, 4, 3], [3, 4], [5]],
"num_tensors": [5],
"axis": [0, 1, 2, 3],
"additional_shape": [1, 2, 3],
}
]
def get_shape(parameters):
"""Return a tweaked version of 'base_shape'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
if axis < len(shape):
shape[axis] += parameters["additional_shape"]
return shape
def build_graph(parameters):
all_tensors = []
for n in range(0, parameters["num_tensors"]):
input_tensor = tf.placeholder(
dtype=parameters["dtype"],
name=("input%d" % n),
shape=get_shape(parameters))
all_tensors.append(input_tensor)
out = tf.stack(all_tensors, parameters["axis"])
return all_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
all_values = []
for _ in range(0, parameters["num_tensors"]):
input_values = create_tensor_data(np.float32, get_shape(parameters))
all_values.append(input_values)
return all_values, sess.run(
outputs, feed_dict=dict(zip(inputs, all_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=72)
@register_make_test_function()
def make_unpack_tests(options):
"""Make a set of tests to do unstack."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.unstack(input_tensor, axis=get_valid_axis(parameters))
return [input_tensor], [outs[0]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_range_tests(options):
"""Make a set of tests to do range."""
test_parameters = [{
"dtype": [tf.int32, tf.float32],
"offset": [10, 100, 1000],
"delta": [1, 2, 3, 4, -1, -2, -3, -4],
}]
def build_graph(parameters):
"""Build the range op testing graph."""
input_tensor = tf.placeholder(
dtype=parameters["dtype"], name=("start"), shape=[])
if parameters["delta"] < 0:
offset = parameters["offset"] * -1
else:
offset = parameters["offset"]
delta = parameters["delta"]
limit_tensor = input_tensor + offset
delta_tensor = tf.constant(delta, dtype=parameters["dtype"])
out = tf.range(input_tensor, limit_tensor, delta_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_scalar_data(parameters["dtype"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_fill_tests(options):
"""Make a set of tests to do fill."""
test_parameters = [{
"dims_dtype": [tf.int32, tf.int64],
"dims_shape": [[], [1], [3], [3, 3]],
"value_dtype": [tf.int32, tf.int64, tf.float32],
}]
def build_graph(parameters):
"""Build the fill op testing graph."""
input1 = tf.placeholder(
dtype=parameters["dims_dtype"],
name="dims",
shape=parameters["dims_shape"])
input2 = tf.placeholder(
dtype=parameters["value_dtype"], name="value", shape=[])
out = tf.fill(input1, input2)
return [input1, input2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dims_dtype"],
parameters["dims_shape"], 1)
input2 = create_scalar_data(parameters["value_dtype"])
return [input1, input2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input1, input2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=12)
def _make_logical_tests(op):
"""Make a set of tests to do logical operations."""
def logical(options, expected_tf_failures=0):
"""Generate examples."""
test_parameters = [{
"input_shape_pair": [([], []), ([1, 1, 1, 3], [1, 1, 1, 3]),
([2, 3, 4, 5], [2, 3, 4, 5]), ([2, 3, 3], [2, 3]),
([5, 5], [1]), ([10], [2, 4, 10])],
}]
def build_graph(parameters):
"""Build the logical testing graph."""
input_value1 = tf.placeholder(
dtype=tf.bool, name="input1", shape=parameters["input_shape_pair"][0])
input_value2 = tf.placeholder(
dtype=tf.bool, name="input2", shape=parameters["input_shape_pair"][1])
out = op(input_value1, input_value2)
return [input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][0])
input_value2 = create_tensor_data(tf.bool,
parameters["input_shape_pair"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
return logical
@register_make_test_function()
def make_logical_or_tests(options):
"""Make a set of tests to do logical_or."""
return _make_logical_tests(tf.logical_or)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_and_tests(options):
"""Make a set of tests to do logical_and."""
return _make_logical_tests(tf.logical_and)(options, expected_tf_failures=1)
@register_make_test_function()
def make_logical_xor_tests(options):
"""Make a set of tests to do logical_xor.
Test logical_not as well.
"""
return _make_logical_tests(tf.logical_xor)(options, expected_tf_failures=1)
@register_make_test_function()
def make_mirror_pad_tests(options):
"""Make a set of tests to do mirror_pad."""
test_parameters = [
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [1, 1]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[2, 3]],
"padding_matrix": [[[1, 1], [2, 1]]],
"mode": ["REFLECT"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["SYMMETRIC"],
"type": ["const"]
},
{
"input_shape": [[3]],
"padding_matrix": [[[0, 2]]],
"mode": ["REFLECT"],
"type": ["const"]
},
{
"input_shape": [[3, 2, 4, 5]],
"padding_matrix": [[[1, 1], [2, 2], [1, 1], [1, 1]]],
"mode": ["SYMMETRIC"],
"type": ["placeholder"]
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["type"] != "const":
padding_matrix = tf.placeholder(
dtype=tf.int32,
name="padding",
shape=[len(parameters["input_shape"]), 2])
input_tensors = [input_tensor, padding_matrix]
else:
padding_matrix = tf.constant(np.array(parameters["padding_matrix"]))
input_tensors = [input_tensor]
output = tf.pad(
input_tensor, paddings=padding_matrix, mode=parameters["mode"])
return input_tensors, [output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
if parameters["type"] != "const":
input_values.append(np.array(parameters["padding_matrix"]))
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unroll_batch_matmul_tests(options):
"""Make a set of tests to test unroll_batch_matmul."""
# The test cases below requires broadcasting support (BatchMatMulV2 semantic),
# whis isn't supported as of this change.
broadcast_shape_params = [
# Simple broadcast.
[(1, 2, 3), (3, 5), False, False],
# Empty batch broadcast.
[(2, 5, 3), (3, 7), False, False],
# Single batch with non-empty batch broadcast.
[(1, 5, 3), (4, 3, 7), False, False],
# Broadcast both operands
[(3, 1, 5, 3), (1, 4, 3, 7), False, False],
]
test_parameters = [{
"dtype": [tf.float32],
"shape": [
[(2, 2, 3), (2, 3, 2), False, False],
[(2, 2, 3), (2, 3, 2), True, True],
[(2, 2, 3), (2, 2, 3), False, True],
[(2, 2, 3), (2, 2, 3), True, False],
[(4, 2, 2, 3), (4, 2, 3, 2), False, False],
[(4, 2, 2, 3), (4, 2, 3, 2), True, True],
[(4, 2, 2, 3), (4, 2, 2, 3), False, True],
[(4, 2, 2, 3), (4, 2, 2, 3), True, False]
] + broadcast_shape_params,
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops.
"forward_compatibility_test": [False, True],
}]
def build_graph(parameters):
"""Build the batch_matmul op testing graph."""
def _build_graph():
input_tensor1 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][0])
input_tensor2 = tf.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"][1])
# Should be unrolled and replaced with fully_connected ops in the end.
out = tf.matmul(
input_tensor1,
input_tensor2,
transpose_a=parameters["shape"][2],
transpose_b=parameters["shape"][3])
return [input_tensor1, input_tensor2], [out]
if parameters["forward_compatibility_test"]:
# This is hardcoded to the date after MatMulV2 is activated.
# TODO(b/130887442): Improve the forward compatibility tests for every
# ops, and remove the hardcoded date.
with tf.compat.forward_compatibility_horizon(2019, 4, 26):
return _build_graph()
else:
return _build_graph()
def build_inputs(parameters, sess, inputs, outputs):
input_value1 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][0])
input_value2 = create_tensor_data(
parameters["dtype"], shape=parameters["shape"][1])
return [input_value1, input_value2], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value1, input_value2])))
make_zip_of_tests(
options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_placeholder_with_default_tests(options):
"""Make a set of tests to test placeholder_with_default."""
test_parameters = [{
"dtype": [tf.float32, tf.int32, tf.int64],
}]
def build_graph(parameters):
"""Build the placeholder_with_default testing graph."""
const_node = tf.constant(
[1, 2, 2, 0], shape=[2, 2], dtype=parameters["dtype"])
input_tensor = tf.placeholder_with_default(
const_node, shape=[2, 2], name="input")
out = tf.equal(input_tensor, const_node, name="output")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
numpy_type = _TF_TYPE_INFO[parameters["dtype"]][0]
input_value = np.array([[1, 0], [2, 1]], numpy_type)
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_unique_tests(options):
"""Make a set of tests for Unique op."""
test_parameters = [
{
"input_shape": [[1]],
"index_type": [tf.int32, tf.int64, None],
"input_values": [3]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 2, 3]]
},
{
"input_shape": [[7]],
"index_type": [tf.int32, tf.int64],
"input_values": [[1, 1, 1, 1, 1, 1, 1]]
},
{
"input_shape": [[5]],
"index_type": [tf.int32, tf.int64],
"input_values": [[3, 2, 1, 0, -1]]
}]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.placeholder(
dtype=tf.int32, name="input", shape=parameters["input_shape"])
if parameters["index_type"] is None:
output = tf.unique(input_tensor)
else:
output = tf.unique(input_tensor, parameters["index_type"])
return [input_tensor], output
def build_inputs(parameters, sess, inputs, outputs):
input_values = [create_tensor_data(tf.int32, parameters["input_shape"])]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_v2_tests(options):
"""Make a set of tests to do reverse_v2."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.reverse(input_tensor, axis=[get_valid_axis(parameters)])
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_reverse_sequence_tests(options):
"""Make a set of tests to do reverse_sequence."""
test_parameters = [
{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[8, 4, 5, 5, 6], [4, 4, 3, 5]],
"seq_lengths": [[2, 2, 2, 2], [2, 1, 1, 0]],
"seq_axis": [0, 3],
"batch_axis": [1]
},
{
"input_dtype": [tf.float32],
"input_shape": [[2, 4, 5, 5, 6]],
"seq_lengths": [[2, 1]],
"seq_axis": [2],
"batch_axis": [0]
},
{
"input_dtype": [tf.float32],
"input_shape": [[4, 2]],
"seq_lengths": [[3, 1]],
"seq_axis": [0],
"batch_axis": [1]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.reverse_sequence(
input_value,
seq_lengths=parameters["seq_lengths"],
batch_axis=parameters["batch_axis"],
seq_axis=parameters["seq_axis"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_diag_tests(options):
"""Make a set of tests for tf.linalg.diag op."""
test_parameters = [
{
"input_shape": [[3], [2, 3], [3, 4, 5], [2, 4, 6, 8]],
"input_dtype": [tf.int32, tf.float32],
},
]
def build_graph(parameters):
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.matrix_diag(input_tensor)
return [input_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_matrix_set_diag_tests(options):
"""Make a set of tests for tf.linalg.set_diag op."""
test_parameters = [
{
"input_diag_shapes": [([3, 3], [3]), ([2, 3], [2]), ([2, 4, 4],
[2, 4]),
([3, 4, 5, 6], [3, 4, 5])],
"input_dtype": [tf.int32, tf.float32, tf.uint8],
},
]
def build_graph(parameters):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="input", shape=input_shape)
diag_tensor = tf.placeholder(
dtype=parameters["input_dtype"], name="diagonal", shape=diag_shape)
outs = tf.matrix_set_diag(input_tensor, diag_tensor)
return [input_tensor, diag_tensor], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_shape = parameters["input_diag_shapes"][0]
diag_shape = parameters["input_diag_shapes"][1]
input_values = create_tensor_data(parameters["input_dtype"], input_shape)
diag_values = create_tensor_data(parameters["input_dtype"], diag_shape)
return [input_values, diag_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values, diag_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function()
def make_eye_tests(options):
"""Make a set of tests for tf.eye op."""
test_parameters = [{
"num_rows_shape": [[]],
"num_cols_shape": [[]],
"batch_shape": [[3], [2, 4], [4, 5, 6], None],
"use_num_cols": [True, False],
"dtype": [tf.float32, tf.int32],
}]
def build_graph(parameters):
input_tensor0 = tf.placeholder(
dtype=tf.int32, name="num_rows", shape=parameters["num_rows_shape"])
input_tensor1 = tf.placeholder(
dtype=tf.int32, name="num_columns", shape=parameters["num_cols_shape"])
if parameters["use_num_cols"]:
outs = tf.eye(
num_rows=input_tensor0,
num_columns=input_tensor1,
batch_shape=parameters["batch_shape"],
dtype=parameters["dtype"])
return [input_tensor0, input_tensor1], [outs]
else:
outs = tf.eye(num_rows=input_tensor0, dtype=parameters["dtype"])
return [input_tensor0], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value0 = create_scalar_data(dtype=np.int32, min_value=1)
input_value1 = create_scalar_data(dtype=np.int32, min_value=1)
if parameters["use_num_cols"]:
return [input_value0, input_value1], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0, input_value1])))
else:
return [input_value0], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value0])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
@register_make_test_function(name="make_unidirectional_sequence_lstm_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_lstm_tests(options):
"""Make a set of tests to do unidirectional_sequence_lstm."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"use_peepholes": [False, True],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"],
use_peepholes=parameters["use_peepholes"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
lstm_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
lstm_cell = tf.lite.experimental.nn.TFLiteLSTMCell(
parameters["units"], use_peepholes=parameters["use_peepholes"])
outs, _ = tf.nn.static_rnn(lstm_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence lstm, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function(name="make_unidirectional_sequence_rnn_tests")
@test_util.enable_control_flow_v2
def make_unidirectional_sequence_rnn_tests(options):
"""Make a set of tests to do unidirectional_sequence_rnn."""
test_parameters = [{
"batch_size": [2, 4, 6],
"seq_length": [1, 3],
"units": [4, 5],
"is_dynamic_rnn": [False, True]
}]
def build_graph(parameters):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = tf.placeholder(dtype=tf.float32, name="input", shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.lite.experimental.nn.dynamic_rnn(
rnn_cell, input_value, dtype=tf.float32, time_major=True)
outs = tf.unstack(outs, axis=1)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = tf.placeholder(
dtype=tf.float32, name=("input_%d" % i), shape=shape)
input_values.append(input_value)
rnn_cell = tf.lite.experimental.nn.TfLiteRNNCell(parameters["units"])
outs, _ = tf.nn.static_rnn(rnn_cell, input_values, dtype=tf.float32)
real_output = tf.zeros([1], dtype=tf.float32) + outs[-1]
return input_values, [real_output]
def build_inputs(parameters, sess, inputs, outputs):
input_values = []
if parameters["is_dynamic_rnn"]:
shape = [
parameters["seq_length"], parameters["batch_size"],
parameters["units"]
]
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
else:
shape = [parameters["batch_size"], parameters["units"]]
for i in range(parameters["seq_length"]):
input_value = create_tensor_data(tf.float32, shape)
input_values.append(input_value)
init = tf.global_variables_initializer()
sess.run(init)
# Tflite fused kernel takes input as [time, batch, input].
# For static unidirectional sequence rnn, the input is an array sized of
# time, and pack the array together, however, for time = 1, the input is
# not packed.
tflite_input_values = input_values
if not parameters["is_dynamic_rnn"] and parameters["seq_length"] == 1:
tflite_input_values = [
input_values[0].reshape((1, parameters["batch_size"],
parameters["units"]))
]
return tflite_input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_unfused_gru_tests(options):
"""Make a set of tests for unfused gru op."""
test_parameters = [{
"units": [2, 5],
"batch_size": [1, 2],
"time": [3],
}]
def build_graph(parameters):
inputs = [
tf.placeholder(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
cell_fw = tf.nn.rnn_cell.GRUCell(parameters["units"])
cell_bw = tf.nn.rnn_cell.GRUCell(parameters["units"])
outputs, _, _ = tf.nn.static_bidirectional_rnn(
cell_fw, cell_bw, inputs, dtype=tf.float32)
return inputs, outputs
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(tf.float32,
[parameters["batch_size"], parameters["units"]])
for _ in range(parameters["time"])
]
init = tf.global_variables_initializer()
sess.run(init)
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
@register_make_test_function()
def make_rfft2d_tests(options):
"""Make a set of tests to do rfft2d."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[8, 8], [3, 8, 8]],
"fft_length": [
None, [4, 4], [4, 8], [8, 4], [8, 8], [8, 16], [16, 8], [16, 16]
]
}]
def build_graph(parameters):
input_value = tf.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
with spectral_ops_test_util.fft_kernel_label_map():
outs = tf.signal.rfft2d(input_value, fft_length=parameters["fft_length"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
# Toco binary path provided by the generate rule.
bin_path = None
def generate_examples(options):
global bin_path
def mkdir_if_not_exist(x):
if not os.path.isdir(x):
os.mkdir(x)
if not os.path.isdir(x):
raise RuntimeError("Failed to create dir %r" % x)
opstest_path = os.path.join(options.output_path)
mkdir_if_not_exist(opstest_path)
out = options.zip_to_output
bin_path = options.toco
# Some zip filenames contain a postfix identifying the conversion mode. The
# list of valid conversion modes is defined in
# generated_test_conversion_modes() in build_def.bzl.
test_function = ("make_%s_tests" % (out.replace(".zip", "").replace(
"pb2lite", "").replace("toco-flex", "").rstrip("_")))
if test_function not in _MAKE_TEST_FUNCTIONS_MAP:
raise RuntimeError("Can't find a test function to create %r. Tried %r" %
(out, test_function))
_MAKE_TEST_FUNCTIONS_MAP[test_function](options)
| alsrgv/tensorflow | tensorflow/lite/testing/generate_examples_lib.py | Python | apache-2.0 | 176,619 |
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import binascii
import io
import logging
import os
import traceback
from builtins import bytes, object, open
from contextlib import contextmanager
from pants.scm.scm import Scm
from pants.util.contextutil import pushd
from pants.util.memo import memoized_method
from pants.util.process_handler import subprocess
from pants.util.strutil import ensure_binary, ensure_text
# 40 is Linux's hard-coded limit for total symlinks followed when resolving a path.
MAX_SYMLINKS_IN_REALPATH = 40
GIT_HASH_LENGTH = 20
# Precompute these because ensure_binary is slow and we'll need them a lot
SLASH = ensure_binary('/')
NUL = ensure_binary('\0')
SPACE = ensure_binary(' ')
NEWLINE = ensure_binary('\n')
EMPTY_STRING = ensure_binary("")
logger = logging.getLogger(__name__)
class Git(Scm):
"""An Scm implementation backed by git."""
@classmethod
def detect_worktree(cls, binary='git', subdir=None):
"""Detect the git working tree above cwd and return it; else, return None.
:param string binary: The path to the git binary to use, 'git' by default.
:param string subdir: The path to start searching for a git repo.
:returns: path to the directory where the git working tree is rooted.
:rtype: string
"""
# TODO(John Sirois): This is only used as a factory for a Git instance in
# pants.base.build_environment.get_scm, encapsulate in a true factory method.
cmd = [binary, 'rev-parse', '--show-toplevel']
try:
if subdir:
with pushd(subdir):
process, out = cls._invoke(cmd)
else:
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode, raise_type=Scm.ScmException)
except Scm.ScmException:
return None
return cls._cleanse(out)
@classmethod
def clone(cls, repo_url, dest, binary='git'):
"""Clone the repo at repo_url into dest.
:param string binary: The path to the git binary to use, 'git' by default.
:returns: an instance of this class representing the cloned repo.
:rtype: Git
"""
cmd = [binary, 'clone', repo_url, dest]
process, out = cls._invoke(cmd)
cls._check_result(cmd, process.returncode)
return cls(binary=binary, worktree=dest)
@classmethod
def _invoke(cls, cmd):
"""Invoke the given command, and return a tuple of process and raw binary output.
stderr flows to wherever its currently mapped for the parent process - generally to
the terminal where the user can see the error.
:param list cmd: The command in the form of a list of strings
:returns: The completed process object and its standard output.
:raises: Scm.LocalException if there was a problem exec'ing the command at all.
"""
try:
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as e:
# Binary DNE or is not executable
raise cls.LocalException('Failed to execute command {}: {}'.format(' '.join(cmd), e))
out, _ = process.communicate()
return process, out
@classmethod
def _cleanse(cls, output, errors='strict'):
return output.strip().decode('utf-8', errors=errors)
@classmethod
def _check_result(cls, cmd, result, failure_msg=None, raise_type=Scm.ScmException):
if result != 0:
raise raise_type(failure_msg or '{} failed with exit code {}'.format(' '.join(cmd), result))
def __init__(self, binary='git', gitdir=None, worktree=None, remote=None, branch=None):
"""Creates a git scm proxy that assumes the git repository is in the cwd by default.
binary: The path to the git binary to use, 'git' by default.
gitdir: The path to the repository's git metadata directory (typically '.git').
worktree: The path to the git repository working tree directory (typically '.').
remote: The default remote to use.
branch: The default remote branch to use.
"""
super(Scm, self).__init__()
self._gitcmd = binary
self._worktree = os.path.realpath(worktree or os.getcwd())
self._gitdir = os.path.realpath(gitdir) if gitdir else os.path.join(self._worktree, '.git')
self._remote = remote
self._branch = branch
def current_rev_identifier(self):
return 'HEAD'
@property
def worktree(self):
return self._worktree
@property
def commit_id(self):
return self._check_output(['rev-parse', 'HEAD'], raise_type=Scm.LocalException)
@property
def server_url(self):
git_output = self._check_output(['remote', '--verbose'], raise_type=Scm.LocalException)
def origin_urls():
for line in git_output.splitlines():
name, url, action = line.split()
if name == 'origin' and action == '(push)':
yield url
origins = list(origin_urls())
if len(origins) != 1:
raise Scm.LocalException("Unable to find remote named 'origin' that accepts pushes "
"amongst:\n{}".format(git_output))
return origins[0]
@property
def tag_name(self):
# Calls to git describe can have bad performance on large repos. Be aware
# of the performance hit if you use this property.
tag = self._check_output(['describe', '--tags', '--always'], raise_type=Scm.LocalException)
return None if 'cannot' in tag else tag
@property
def branch_name(self):
branch = self._check_output(['rev-parse', '--abbrev-ref', 'HEAD'],
raise_type=Scm.LocalException)
return None if branch == 'HEAD' else branch
def fix_git_relative_path(self, worktree_path, relative_to):
return os.path.relpath(os.path.join(self._worktree, worktree_path), relative_to)
def changed_files(self, from_commit=None, include_untracked=False, relative_to=None):
relative_to = relative_to or self._worktree
rel_suffix = ['--', relative_to]
uncommitted_changes = self._check_output(['diff', '--name-only', 'HEAD'] + rel_suffix,
raise_type=Scm.LocalException)
files = set(uncommitted_changes.splitlines())
if from_commit:
# Grab the diff from the merge-base to HEAD using ... syntax. This ensures we have just
# the changes that have occurred on the current branch.
committed_cmd = ['diff', '--name-only', from_commit + '...HEAD'] + rel_suffix
committed_changes = self._check_output(committed_cmd,
raise_type=Scm.LocalException)
files.update(committed_changes.split())
if include_untracked:
untracked_cmd = ['ls-files', '--other', '--exclude-standard', '--full-name'] + rel_suffix
untracked = self._check_output(untracked_cmd,
raise_type=Scm.LocalException)
files.update(untracked.split())
# git will report changed files relative to the worktree: re-relativize to relative_to
return {self.fix_git_relative_path(f, relative_to) for f in files}
def changes_in(self, diffspec, relative_to=None):
relative_to = relative_to or self._worktree
cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', diffspec]
files = self._check_output(cmd, raise_type=Scm.LocalException).split()
return {self.fix_git_relative_path(f.strip(), relative_to) for f in files}
def changelog(self, from_commit=None, files=None):
# We force the log output encoding to be UTF-8 here since the user may have a git config that
# overrides the git UTF-8 default log output encoding.
args = ['log', '--encoding=UTF-8', '--no-merges', '--stat', '--find-renames', '--find-copies']
if from_commit:
args.append(from_commit + '..HEAD')
if files:
args.append('--')
args.extend(files)
# There are various circumstances that can lead to git logs that are not transcodeable to utf-8,
# for example: http://comments.gmane.org/gmane.comp.version-control.git/262685
# Git will not error in these cases and we do not wish to either. Here we direct byte sequences
# that can not be utf-8 decoded to be replaced with the utf-8 replacement character.
return self._check_output(args, raise_type=Scm.LocalException, errors='replace')
def merge_base(self, left='master', right='HEAD'):
"""Returns the merge-base of master and HEAD in bash: `git merge-base left right`"""
return self._check_output(['merge-base', left, right], raise_type=Scm.LocalException)
def refresh(self, leave_clean=False):
"""Attempt to pull-with-rebase from upstream. This is implemented as fetch-plus-rebase
so that we can distinguish between errors in the fetch stage (likely network errors)
and errors in the rebase stage (conflicts). If leave_clean is true, then in the event
of a rebase failure, the branch will be rolled back. Otherwise, it will be left in the
conflicted state.
"""
remote, merge = self._get_upstream()
self._check_call(['fetch', '--tags', remote, merge], raise_type=Scm.RemoteException)
try:
self._check_call(['rebase', 'FETCH_HEAD'], raise_type=Scm.LocalException)
except Scm.LocalException as e:
if leave_clean:
logger.debug('Cleaning up after failed rebase')
try:
self._check_call(['rebase', '--abort'], raise_type=Scm.LocalException)
except Scm.LocalException as abort_exc:
logger.debug('Failed to up after failed rebase')
logger.debug(traceback.format_exc(abort_exc))
# But let the original exception propagate, since that's the more interesting one
raise e
def tag(self, name, message=None):
# We use -a here instead of --annotate to maintain maximum git compatibility.
# --annotate was only introduced in 1.7.8 via:
# https://github.com/git/git/commit/c97eff5a95d57a9561b7c7429e7fcc5d0e3a7f5d
self._check_call(['tag', '-a', '--message=' + (message or ''), name],
raise_type=Scm.LocalException)
self.push('refs/tags/' + name)
def commit(self, message, verify=True):
cmd = ['commit', '--all', '--message=' + message]
if not verify:
cmd.append('--no-verify')
self._check_call(cmd, raise_type=Scm.LocalException)
def add(self, *paths):
self._check_call(['add'] + list(paths), raise_type=Scm.LocalException)
def commit_date(self, commit_reference):
return self._check_output(['log', '-1', '--pretty=tformat:%ci', commit_reference],
raise_type=Scm.LocalException)
def push(self, *refs):
remote, merge = self._get_upstream()
self._check_call(['push', remote, merge] + list(refs), raise_type=Scm.RemoteException)
def set_state(self, rev):
self._check_call(['checkout', rev])
def _get_upstream(self):
"""Return the remote and remote merge branch for the current branch"""
if not self._remote or not self._branch:
branch = self.branch_name
if not branch:
raise Scm.LocalException('Failed to determine local branch')
def get_local_config(key):
value = self._check_output(['config', '--local', '--get', key],
raise_type=Scm.LocalException)
return value.strip()
self._remote = self._remote or get_local_config('branch.{}.remote'.format(branch))
self._branch = self._branch or get_local_config('branch.{}.merge'.format(branch))
return self._remote, self._branch
def _check_call(self, args, failure_msg=None, raise_type=None):
cmd = self._create_git_cmdline(args)
self._log_call(cmd)
result = subprocess.call(cmd)
self._check_result(cmd, result, failure_msg, raise_type)
def _check_output(self, args, failure_msg=None, raise_type=None, errors='strict'):
cmd = self._create_git_cmdline(args)
self._log_call(cmd)
process, out = self._invoke(cmd)
self._check_result(cmd, process.returncode, failure_msg, raise_type)
return self._cleanse(out, errors=errors)
def _create_git_cmdline(self, args):
return [self._gitcmd, '--git-dir=' + self._gitdir, '--work-tree=' + self._worktree] + args
def _log_call(self, cmd):
logger.debug('Executing: ' + ' '.join(cmd))
def repo_reader(self, rev):
return GitRepositoryReader(self, rev)
class GitRepositoryReader(object):
"""
Allows reading from files and directory information from an arbitrary git
commit. This is useful for pants-aware git sparse checkouts.
"""
def __init__(self, scm, rev):
self.scm = scm
self.rev = rev
self._cat_file_process = None
# Trees is a dict from path to [list of Dir, Symlink or File objects]
self._trees = {}
self._realpath_cache = {'.': './', '': './'}
def _maybe_start_cat_file_process(self):
if not self._cat_file_process:
cmdline = self.scm._create_git_cmdline(['cat-file', '--batch'])
self._cat_file_process = subprocess.Popen(cmdline,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
class MissingFileException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "MissingFileException({}, {})".format(self.relpath, self.rev)
class IsDirException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "IsDirException({}, {})".format(self.relpath, self.rev)
class NotADirException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "NotADirException({}, {})".format(self.relpath, self.rev)
class SymlinkLoopException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "SymlinkLoop({}, {})".format(self.relpath, self.rev)
class ExternalSymlinkException(Exception):
def __init__(self, rev, relpath):
self.relpath = relpath
self.rev = rev
def __str__(self):
return "ExternalSymlink({}, {})".format(self.relpath, self.rev)
class GitDiedException(Exception):
pass
class UnexpectedGitObjectTypeException(Exception):
# Programmer error
pass
def _safe_realpath(self, relpath):
try:
return self._realpath(relpath)
except self.MissingFileException:
return None
except self.NotADirException:
return None
def _safe_read_object(self, relpath, max_symlinks):
try:
return self._read_object(relpath, max_symlinks)
except self.MissingFileException:
return None, relpath
except self.NotADirException:
return None, relpath
def exists(self, relpath):
path = self._safe_realpath(relpath)
return bool(path)
def isfile(self, relpath):
path = self._safe_realpath(relpath)
if path:
return not path.endswith('/')
return False
def isdir(self, relpath):
path = self._safe_realpath(relpath)
if path:
return path.endswith('/')
return False
def lstat(self, relpath):
obj, _ = self._safe_read_object(relpath, max_symlinks=0)
return obj
def readlink(self, relpath):
# TODO: Relatively inefficient, but easier than changing read_object, unfortunately.
if type(self.lstat(relpath)) != self.Symlink:
return None
obj, path_so_far = self._safe_read_object(relpath, max_symlinks=1)
if obj == None:
return None
return path_so_far
class Symlink(object):
def __init__(self, name, sha):
self.name = name
self.sha = sha
class Dir(object):
def __init__(self, name, sha):
self.name = name
self.sha = sha
class File(object):
def __init__(self, name, sha):
self.name = name
self.sha = sha
def listdir(self, relpath):
"""Like os.listdir, but reads from the git repository.
:returns: a list of relative filenames
"""
path = self._realpath(relpath)
if not path.endswith('/'):
raise self.NotADirException(self.rev, relpath)
if path[0] == '/' or path.startswith('../'):
return os.listdir(path)
tree = self._read_tree(path[:-1])
return list(tree.keys())
@contextmanager
def open(self, relpath):
"""Read a file out of the repository at a certain revision.
This is complicated because, unlike vanilla git cat-file, this follows symlinks in
the repo. If a symlink points outside repo, the file is read from the filesystem;
that's because presumably whoever put that symlink there knew what they were doing.
"""
path = self._realpath(relpath)
if path.endswith('/'):
raise self.IsDirException(self.rev, relpath)
if path.startswith('../') or path[0] == '/':
yield open(path, 'rb')
return
object_type, data = self._read_object_from_repo(rev=self.rev, relpath=path)
if object_type == b'tree':
raise self.IsDirException(self.rev, relpath)
assert object_type == b'blob'
yield io.BytesIO(data)
@memoized_method
def _realpath(self, relpath):
"""Follow symlinks to find the real path to a file or directory in the repo.
:returns: if the expanded path points to a file, the relative path
to that file; if a directory, the relative path + '/'; if
a symlink outside the repo, a path starting with / or ../.
"""
obj, path_so_far = self._read_object(relpath, MAX_SYMLINKS_IN_REALPATH)
if isinstance(obj, self.Symlink):
raise self.SymlinkLoopException(self.rev, relpath)
return path_so_far
def _read_object(self, relpath, max_symlinks):
path_so_far = ''
components = list(relpath.split(os.path.sep))
symlinks = 0
# Consume components to build path_so_far
while components:
component = components.pop(0)
if component == '' or component == '.':
continue
parent_tree = self._read_tree(path_so_far)
parent_path = path_so_far
if path_so_far != '':
path_so_far += '/'
path_so_far += component
try:
obj = parent_tree[component.encode('utf-8')]
except KeyError:
raise self.MissingFileException(self.rev, relpath)
if isinstance(obj, self.File):
if components:
# We've encountered a file while searching for a directory
raise self.NotADirException(self.rev, relpath)
else:
return obj, path_so_far
elif isinstance(obj, self.Dir):
if not components:
return obj, path_so_far + '/'
# A dir is OK; we just descend from here
elif isinstance(obj, self.Symlink):
symlinks += 1
if symlinks > max_symlinks:
return obj, path_so_far
# A git symlink is stored as a blob containing the name of the target.
# Read that blob.
object_type, path_data = self._read_object_from_repo(sha=obj.sha)
assert object_type == b'blob'
if path_data[0] == b'/':
# Is absolute, thus likely points outside the repo.
raise self.ExternalSymlinkException(self.rev, relpath)
link_to = os.path.normpath(os.path.join(parent_path, path_data.decode('utf-8')))
if link_to.startswith('../') or link_to[0] == '/':
# Points outside the repo.
raise self.ExternalSymlinkException(self.rev, relpath)
# Restart our search at the top with the new path.
# Git stores symlinks in terms of Unix paths, so split on '/' instead of os.path.sep
components = link_to.split('/') + components
path_so_far = ''
else:
# Programmer error
raise self.UnexpectedGitObjectTypeException()
return self.Dir('./', None), './'
def _fixup_dot_relative(self, path):
"""Git doesn't understand dot-relative paths."""
if path.startswith('./'):
return path[2:]
elif path == '.':
return ''
return path
def _read_tree(self, path):
"""Given a revision and path, parse the tree data out of git cat-file output.
:returns: a dict from filename -> [list of Symlink, Dir, and File objects]
"""
path = self._fixup_dot_relative(path)
tree = self._trees.get(path)
if tree:
return tree
tree = {}
object_type, tree_data = self._read_object_from_repo(rev=self.rev, relpath=path)
assert object_type == b'tree'
# The tree data here is (mode ' ' filename \0 20-byte-sha)*
# It's transformed to a list of byte chars to allow iteration.
# See http://python-future.org/compatible_idioms.html#byte-string-literals.
tree_data = [bytes([b]) for b in tree_data]
i = 0
while i < len(tree_data):
start = i
while tree_data[i] != b' ':
i += 1
mode = b''.join(tree_data[start:i])
i += 1 # skip space
start = i
while tree_data[i] != NUL:
i += 1
name = b''.join(tree_data[start:i])
sha = b''.join(tree_data[i + 1:i + 1 + GIT_HASH_LENGTH])
sha_hex = binascii.hexlify(sha)
i += 1 + GIT_HASH_LENGTH
if mode == b'120000':
tree[name] = self.Symlink(name, sha_hex)
elif mode == b'40000':
tree[name] = self.Dir(name, sha_hex)
else:
tree[name] = self.File(name, sha_hex)
self._trees[path] = tree
return tree
def _read_object_from_repo(self, rev=None, relpath=None, sha=None):
"""Read an object from the git repo.
This is implemented via a pipe to git cat-file --batch
"""
if sha:
spec = sha + b'\n'
else:
assert rev is not None
assert relpath is not None
rev = ensure_text(rev)
relpath = ensure_text(relpath)
relpath = self._fixup_dot_relative(relpath)
spec = '{}:{}\n'.format(rev, relpath).encode('utf-8')
self._maybe_start_cat_file_process()
self._cat_file_process.stdin.write(spec)
self._cat_file_process.stdin.flush()
header = None
while not header:
header = self._cat_file_process.stdout.readline()
if self._cat_file_process.poll() is not None:
raise self.GitDiedException("Git cat-file died while trying to read '{}'.".format(spec))
header = header.rstrip()
parts = header.rsplit(SPACE, 2)
if len(parts) == 2:
assert parts[1] == b'missing'
raise self.MissingFileException(rev, relpath)
_, object_type, object_len = parts
# Read the object data
blob = bytes(self._cat_file_process.stdout.read(int(object_len)))
# Read the trailing newline
assert self._cat_file_process.stdout.read(1) == b'\n'
assert len(blob) == int(object_len)
return object_type, blob
def __del__(self):
if self._cat_file_process:
self._cat_file_process.communicate()
| twitter/pants | src/python/pants/scm/git.py | Python | apache-2.0 | 22,734 |
"""
Copyright 2014 Quentin Kaiser
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from nessusobject import NessusObject
class Scanner(NessusObject):
"""
A Nessus Scan Template instance.
Attributes:
_Google Python Style Guide:
http://google-styleguide.googlecode.com/svn/trunk/pyguide.html
"""
def __init__(self, server):
"""Constructor"""
super(Scanner, self).__init__(server)
self._id = None
self._uuid = None
self._name = None
self._type = None
self._status = None
self._scan_count = 0
self._engine_version = None
self._platform = None
self._loaded_plugin_set = None
self._registration_code = None
self._owner = None
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = int(value)
@property
def uuid(self):
return self._uuid
@uuid.setter
def uuid(self, value):
self._uuid = str(value)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = str(value)
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = str(value)
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = str(value)
@property
def scan_count(self):
return self._scan_count
@scan_count.setter
def scan_count(self, value):
self._scan_count = int(value)
@property
def engine_version(self):
return self._engine_version
@engine_version.setter
def engine_version(self, value):
self._engine_version = str(value)
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, value):
self._platform = str(value)
@property
def loaded_plugin_set(self):
return self._loaded_plugin_set
@loaded_plugin_set.setter
def loaded_plugin_set(self, value):
self._loaded_plugin_set = str(value)
@property
def registration_code(self):
return self._registration_code
@registration_code.setter
def registration_code(self, value):
self._registration_code = str(value)
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = str(value) | QKaiser/pynessus | pynessus/models/scanner.py | Python | apache-2.0 | 2,984 |
# Class definition:
# RunJob
# This is the main RunJob class; RunJobEvent etc will inherit from this class
# Note: at the moment, this class is essentially the old runJob module turned object oriented.
# The class will later become RunJobNormal, ie responible for running normal PanDA jobs.
# At that point a new RunJob top class will be created containing methods that have been
# identified as common between the various sub classes.
# Instances are generated with RunJobFactory
# Subclasses should implement all needed methods prototyped in this class
# Note: not compatible with Singleton Design Pattern due to the subclassing
# Standard python modules
import os, sys, commands, getopt, time
import traceback
import atexit, signal
import stat
from optparse import OptionParser
# Pilot modules
import Site, pUtil, Job, Node, RunJobUtilities
import Mover as mover
from pUtil import debugInfo, tolog, isAnalysisJob, readpar, createLockFile, getDatasetDict, getChecksumCommand,\
tailPilotErrorDiag, getFileAccessInfo, processDBRelease, getCmtconfig, getExtension, getExperiment, getGUID, dumpFile
from JobRecovery import JobRecovery
from FileStateClient import updateFileStates, dumpFileStates
from ErrorDiagnosis import ErrorDiagnosis # import here to avoid issues seen at BU with missing module
from PilotErrors import PilotErrors
from ProxyGuard import ProxyGuard
from shutil import copy2
from FileHandling import tail
# remove logguid, dq2url, debuglevel - not needed
# rename lfcRegistration to catalogRegistration
# relabelled -h, queuename to -b (debuglevel not used)
class RunJob(object):
# private data members
__runjob = "RunJob" # String defining the RunJob class
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
# __appdir = "/usatlas/projects/OSG" # Default software installation directory
# __debugLevel = 0 # 0: debug info off, 1: display function name when called, 2: full debug info
# __dq2url = "" # REMOVE
__failureCode = None # set by signal handler when user/batch system kills the job
__globalPilotErrorDiag = "" # global pilotErrorDiag used with signal handler (only)
__globalErrorCode = 0 # global error code used with signal handler (only)
__inputDir = "" # location of input files (source for mv site mover)
__fileCatalogRegistration = True # should the pilot perform file catalog registration?
__logguid = None # guid for the log file
__outputDir = "" # location of output files (destination for mv site mover)
__pilot_initdir = "" # location of where the pilot is untarred and started
__pilotlogfilename = "pilotlog.txt" # default pilotlog filename
__pilotserver = "localhost" # default server
__pilotport = 88888 # default port
__proxycheckFlag = True # True (default): perform proxy validity checks, False: no check
__pworkdir = "/tmp" # site work dir used by the parent
# __queuename = "" # PanDA queue NOT NEEDED
# __sitename = "testsite" # PanDA site NOT NEEDED
__stageinretry = 1 # number of stage-in tries
__stageoutretry = 1 # number of stage-out tries
# __testLevel = 0 # test suite control variable (0: no test, 1: put error, 2: ...) NOT USED
# __workdir = "/tmp" # NOT USED
__cache = "" # Cache URL, e.g. used by LSST
# Getter and setter methods
def getExperiment(self):
""" Getter for __experiment """
return self.__experiment
def getFailureCode(self):
""" Getter for __failureCode """
return self.__failureCode
def setFailureCode(self, code):
""" Setter for __failureCode """
self.__failureCode = code
def getGlobalPilotErrorDiag(self):
""" Getter for __globalPilotErrorDiag """
return self.__globalPilotErrorDiag
def setGlobalPilotErrorDiag(self, pilotErrorDiag):
""" Setter for __globalPilotErrorDiag """
self.__globalPilotErrorDiag = pilotErrorDiag
def getGlobalErrorCode(self):
""" Getter for __globalErrorCode """
return self.__globalErrorCode
def setGlobalErrorCode(self, code):
""" Setter for __globalErrorCode """
self.__globalErrorCode = code
def setCache(self, cache):
""" Setter for __cache """
self.__cache = cache
def getInputDir(self):
""" Getter for __inputDir """
return self.__inputDir
def getFileCatalogRegistration(self):
""" Getter for __fileCatalogRegistration """
return self.__fileCatalogRegistration
def getLogGUID(self):
""" Getter for __logguid """
return self.__logguid
def getOutputDir(self):
""" Getter for __outputDir """
return self.__outputDir
def getPilotInitDir(self):
""" Getter for __pilot_initdir """
return self.__pilot_initdir
def getPilotLogFilename(self):
""" Getter for __pilotlogfilename """
return self.__pilotlogfilename
def getPilotServer(self):
""" Getter for __pilotserver """
return self.__pilotserver
def getPilotPort(self):
""" Getter for __pilotport """
return self.__pilotport
def getProxyCheckFlag(self):
""" Getter for __proxycheckFlag """
return self.__proxycheckFlag
def getParentWorkDir(self):
""" Getter for __pworkdir """
return self.__pworkdir
def getStageInRetry(self):
""" Getter for __stageinretry """
return self.__stageinretry
def getStageOutRetry(self):
""" Getter for __stageoutretry """
return self.__stageoutretry
def getCache(self):
""" Getter for __cache """
return self.__cache
# Required methods
def __init__(self):
""" Default initialization """
# e.g. self.__errorLabel = errorLabel
pass
def getRunJob(self):
""" Return a string with the module name """
return self.__runjob
def argumentParser(self):
""" Argument parser for the RunJob module """
# Return variables
appdir = None
dq2url = None # REMOVE
queuename = None
sitename = None
workdir = None
parser = OptionParser()
parser.add_option("-a", "--appdir", dest="appdir",
help="The local path to the applications directory", metavar="APPDIR")
parser.add_option("-b", "--queuename", dest="queuename",
help="Queue name", metavar="QUEUENAME")
parser.add_option("-d", "--workdir", dest="workdir",
help="The local path to the working directory of the payload", metavar="WORKDIR")
parser.add_option("-g", "--inputdir", dest="inputDir",
help="Location of input files to be transferred by the mv site mover", metavar="INPUTDIR")
parser.add_option("-i", "--logfileguid", dest="logguid",
help="Log file guid", metavar="GUID")
parser.add_option("-k", "--pilotlogfilename", dest="pilotlogfilename",
help="The name of the pilot log file", metavar="PILOTLOGFILENAME")
parser.add_option("-l", "--pilotinitdir", dest="pilot_initdir",
help="The local path to the directory where the pilot was launched", metavar="PILOT_INITDIR")
parser.add_option("-m", "--outputdir", dest="outputDir",
help="Destination of output files to be transferred by the mv site mover", metavar="OUTPUTDIR")
parser.add_option("-o", "--parentworkdir", dest="pworkdir",
help="Path to the work directory of the parent process (i.e. the pilot)", metavar="PWORKDIR")
parser.add_option("-s", "--sitename", dest="sitename",
help="The name of the site where the job is to be run", metavar="SITENAME")
parser.add_option("-w", "--pilotserver", dest="pilotserver",
help="The URL of the pilot TCP server (localhost) WILL BE RETIRED", metavar="PILOTSERVER")
parser.add_option("-p", "--pilotport", dest="pilotport",
help="Pilot TCP server port (default: 88888)", metavar="PORT")
parser.add_option("-t", "--proxycheckflag", dest="proxycheckFlag",
help="True (default): perform proxy validity checks, False: no check", metavar="PROXYCHECKFLAG")
parser.add_option("-q", "--dq2url", dest="dq2url",
help="DQ2 URL TO BE RETIRED", metavar="DQ2URL")
parser.add_option("-x", "--stageinretries", dest="stageinretry",
help="The number of stage-in retries", metavar="STAGEINRETRY")
parser.add_option("-B", "--filecatalogregistration", dest="fileCatalogRegistration",
help="True (default): perform file catalog registration, False: no catalog registration", metavar="FILECATALOGREGISTRATION")
parser.add_option("-E", "--stageoutretries", dest="stageoutretry",
help="The number of stage-out retries", metavar="STAGEOUTRETRY")
parser.add_option("-F", "--experiment", dest="experiment",
help="Current experiment (default: ATLAS)", metavar="EXPERIMENT")
parser.add_option("-H", "--cache", dest="cache",
help="Cache URL", metavar="CACHE")
# options = {'experiment': 'ATLAS'}
try:
(options, args) = parser.parse_args()
except Exception,e:
tolog("!!WARNING!!3333!! Exception caught:" % (e))
print options.experiment
else:
if options.appdir:
# self.__appdir = options.appdir
appdir = options.appdir
if options.dq2url:
# self.__dq2url = options.dq2url
dq2url = options.dq2url
if options.experiment:
self.__experiment = options.experiment
if options.logguid:
self.__logguid = options.logguid
if options.inputDir:
self.__inputDir = options.inputDir
if options.fileCatalogRegistration:
if options.fileCatalogRegistration.lower() == "false":
self.__fileCatalogRegistration = False
else:
self.__fileCatalogRegistration = True
else:
self.__fileCatalogRegistration = True
if options.pilot_initdir:
self.__pilot_initdir = options.pilot_initdir
if options.pilotlogfilename:
self.__pilotlogfilename = options.pilotlogfilename
if options.pilotserver:
self.__pilotserver = options.pilotserver
if options.proxycheckFlag:
if options.proxycheckFlag.lower() == "false":
self.__proxycheckFlag = False
else:
self.__proxycheckFlag = True
else:
self.__proxycheckFlag = True
if options.pworkdir:
self.__pworkdir = options.pworkdir
if options.outputDir:
self.__outputDir = options.outputDir
if options.pilotport:
try:
self.__pilotport = int(options.pilotport)
except Exception, e:
tolog("!!WARNING!!3232!! Exception caught: %s" % (e))
# self.__queuename is not needed
if options.queuename:
queuename = options.queuename
if options.sitename:
sitename = options.sitename
if options.stageinretry:
try:
self.__stageinretry = int(options.stageinretry)
except Exception, e:
tolog("!!WARNING!!3232!! Exception caught: %s" % (e))
if options.stageoutretry:
try:
self.__stageoutretry = int(options.stageoutretry)
except Exception, e:
tolog("!!WARNING!!3232!! Exception caught: %s" % (e))
if options.workdir:
workdir = options.workdir
if options.cache:
self.__cache = options.cache
return sitename, appdir, workdir, dq2url, queuename
def getRunJobFileName(self):
""" Return the filename of the module """
fullpath = sys.modules[self.__module__].__file__
# Note: the filename above will contain both full path, and might end with .pyc, fix this
filename = os.path.basename(fullpath)
if filename.endswith(".pyc"):
filename = filename[:-1] # remove the trailing 'c'
return filename
def allowLoopingJobKiller(self):
""" Should the pilot search for looping jobs? """
# The pilot has the ability to monitor the payload work directory. If there are no updated files within a certain
# time limit, the pilot will consider the as stuck (looping) and will kill it. The looping time limits are set
# in environment.py (see e.g. loopingLimitDefaultProd)
return True
def cleanup(self, job, rf=None):
""" Cleanup function """
# 'rf' is a list that will contain the names of the files that could be transferred
# In case of transfer problems, all remaining files will be found and moved
# to the data directory for later recovery.
tolog("********************************************************")
tolog(" This job ended with (trf,pilot) exit code of (%d,%d)" % (job.result[1], job.result[2]))
tolog("********************************************************")
# clean up the pilot wrapper modules
pUtil.removePyModules(job.workdir)
if os.path.isdir(job.workdir):
os.chdir(job.workdir)
# remove input files from the job workdir
remFiles = job.inFiles
for inf in remFiles:
if inf and inf != 'NULL' and os.path.isfile("%s/%s" % (job.workdir, inf)): # non-empty string and not NULL
try:
os.remove("%s/%s" % (job.workdir, inf))
except Exception,e:
tolog("!!WARNING!!3000!! Ignore this Exception when deleting file %s: %s" % (inf, str(e)))
pass
# only remove output files if status is not 'holding'
# in which case the files should be saved for the job recovery.
# the job itself must also have finished with a zero trf error code
# (data will be moved to another directory to keep it out of the log file)
# always copy the metadata-<jobId>.xml to the site work dir
# WARNING: this metadata file might contain info about files that were not successfully moved to the SE
# it will be regenerated by the job recovery for the cases where there are output files in the datadir
try:
tolog('job.workdir is %s pworkdir is %s ' % (job.workdir, self.__pworkdir)) # Eddie
copy2("%s/metadata-%s.xml" % (job.workdir, job.jobId), "%s/metadata-%s.xml" % (self.__pworkdir, job.jobId))
except Exception, e:
tolog("Warning: Could not copy metadata-%s.xml to site work dir - ddm Adder problems will occure in case of job recovery" % (job.jobId))
tolog('job.workdir is %s pworkdir is %s ' % (job.workdir, self.__pworkdir)) # Eddie
if job.result[0] == 'holding' and job.result[1] == 0:
try:
# create the data directory
os.makedirs(job.datadir)
except OSError, e:
tolog("!!WARNING!!3000!! Could not create data directory: %s, %s" % (job.datadir, str(e)))
else:
# find all remaining files in case 'rf' is not empty
remaining_files = []
moved_files_list = []
try:
if rf != None:
moved_files_list = RunJobUtilities.getFileNamesFromString(rf[1])
remaining_files = RunJobUtilities.getRemainingFiles(moved_files_list, job.outFiles)
except Exception, e:
tolog("!!WARNING!!3000!! Illegal return value from Mover: %s, %s" % (str(rf), str(e)))
remaining_files = job.outFiles
# move all remaining output files to the data directory
nr_moved = 0
for _file in remaining_files:
try:
os.system("mv %s %s" % (_file, job.datadir))
except OSError, e:
tolog("!!WARNING!!3000!! Failed to move file %s (abort all)" % (_file))
break
else:
nr_moved += 1
tolog("Moved %d/%d output file(s) to: %s" % (nr_moved, len(remaining_files), job.datadir))
# remove all successfully copied files from the local directory
nr_removed = 0
for _file in moved_files_list:
try:
os.system("rm %s" % (_file))
except OSError, e:
tolog("!!WARNING!!3000!! Failed to remove output file: %s, %s" % (_file, e))
else:
nr_removed += 1
tolog("Removed %d output file(s) from local dir" % (nr_removed))
# copy the PoolFileCatalog.xml for non build jobs
if not pUtil.isBuildJob(remaining_files):
_fname = os.path.join(job.workdir, "PoolFileCatalog.xml")
tolog("Copying %s to %s" % (_fname, job.datadir))
try:
copy2(_fname, job.datadir)
except Exception, e:
tolog("!!WARNING!!3000!! Could not copy PoolFileCatalog.xml to data dir - expect ddm Adder problems during job recovery")
# remove all remaining output files from the work directory
# (a successfully copied file should already have been removed by the Mover)
rem = False
for inf in job.outFiles:
if inf and inf != 'NULL' and os.path.isfile("%s/%s" % (job.workdir, inf)): # non-empty string and not NULL
try:
os.remove("%s/%s" % (job.workdir, inf))
except Exception,e:
tolog("!!WARNING!!3000!! Ignore this Exception when deleting file %s: %s" % (inf, str(e)))
pass
else:
tolog("Lingering output file removed: %s" % (inf))
rem = True
if not rem:
tolog("All output files already removed from local dir")
tolog("Payload cleanup has finished")
def sysExit(self, job, rf=None):
'''
wrapper around sys.exit
rs is the return string from Mover::put containing a list of files that were not transferred
'''
self.cleanup(job, rf=rf)
sys.stderr.close()
tolog("RunJob (payload wrapper) has finished")
# change to sys.exit?
os._exit(job.result[2]) # pilotExitCode, don't confuse this with the overall pilot exit code,
# which doesn't get reported back to panda server anyway
def failJob(self, transExitCode, pilotExitCode, job, ins=None, pilotErrorDiag=None, docleanup=True):
""" set the fail code and exit """
if pilotExitCode and job.attemptNr < 4 and job.eventServiceMerge:
pilotExitCode = PilotErrors.ERR_ESRECOVERABLE
job.setState(["failed", transExitCode, pilotExitCode])
if pilotErrorDiag:
job.pilotErrorDiag = pilotErrorDiag
tolog("Will now update local pilot TCP server")
rt = RunJobUtilities.updatePilotServer(job, self.__pilotserver, self.__pilotport, final=True)
if ins:
ec = pUtil.removeFiles(job.workdir, ins)
if docleanup:
self.sysExit(job)
def isMultiTrf(self, parameterList):
""" Will we execute multiple jobs? """
if len(parameterList) > 1:
multi_trf = True
else:
multi_trf = False
return multi_trf
def setup(self, job, jobSite, thisExperiment):
""" prepare the setup and get the run command list """
# start setup time counter
t0 = time.time()
ec = 0
runCommandList = []
# split up the job parameters to be able to loop over the tasks
jobParameterList = job.jobPars.split("\n")
jobHomePackageList = job.homePackage.split("\n")
jobTrfList = job.trf.split("\n")
job.release = thisExperiment.formatReleaseString(job.release)
releaseList = thisExperiment.getRelease(job.release)
tolog("Number of transformations to process: %s" % len(jobParameterList))
multi_trf = self.isMultiTrf(jobParameterList)
# verify that the multi-trf job is setup properly
ec, job.pilotErrorDiag, releaseList = RunJobUtilities.verifyMultiTrf(jobParameterList, jobHomePackageList, jobTrfList, releaseList)
if ec > 0:
return ec, runCommandList, job, multi_trf
os.chdir(jobSite.workdir)
tolog("Current job workdir is %s" % os.getcwd())
# setup the trf(s)
_i = 0
_stdout = job.stdout
_stderr = job.stderr
_first = True
for (_jobPars, _homepackage, _trf, _swRelease) in map(None, jobParameterList, jobHomePackageList, jobTrfList, releaseList):
tolog("Preparing setup %d/%d" % (_i + 1, len(jobParameterList)))
# reset variables
job.jobPars = _jobPars
job.homePackage = _homepackage
job.trf = _trf
job.release = _swRelease
if multi_trf:
job.stdout = _stdout.replace(".txt", "_%d.txt" % (_i + 1))
job.stderr = _stderr.replace(".txt", "_%d.txt" % (_i + 1))
# post process copysetup variable in case of directIn/useFileStager
_copysetup = readpar('copysetup')
_copysetupin = readpar('copysetupin')
if "--directIn" in job.jobPars or "--useFileStager" in job.jobPars or _copysetup.count('^') == 5 or _copysetupin.count('^') == 5:
# only need to update the queuedata file once
if _first:
RunJobUtilities.updateCopysetups(job.jobPars)
_first = False
# setup the trf
ec, job.pilotErrorDiag, cmd, job.spsetup, job.JEM, job.cmtconfig = thisExperiment.getJobExecutionCommand(job, jobSite, self.__pilot_initdir)
if ec > 0:
# setup failed
break
# add the setup command to the command list
runCommandList.append(cmd)
_i += 1
job.stdout = _stdout
job.stderr = _stderr
job.timeSetup = int(time.time() - t0)
tolog("Total setup time: %d s" % (job.timeSetup))
return ec, runCommandList, job, multi_trf
def stageIn(self, job, jobSite, analysisJob, pfc_name="PoolFileCatalog.xml"):
""" Perform the stage-in """
ec = 0
statusPFCTurl = None
usedFAXandDirectIO = False
# Prepare the input files (remove non-valid names) if there are any
ins, job.filesizeIn, job.checksumIn = RunJobUtilities.prepareInFiles(job.inFiles, job.filesizeIn, job.checksumIn)
if ins:
tolog("Preparing for get command")
# Get the file access info (only useCT is needed here)
useCT, oldPrefix, newPrefix, useFileStager, directIn = getFileAccessInfo()
# Transfer input files
tin_0 = os.times()
ec, job.pilotErrorDiag, statusPFCTurl, FAX_dictionary = \
mover.get_data(job, jobSite, ins, self.__stageinretry, analysisJob=analysisJob, usect=useCT,\
pinitdir=self.__pilot_initdir, proxycheck=False, inputDir=self.__inputDir, workDir=self.__pworkdir, pfc_name=pfc_name)
if ec != 0:
job.result[2] = ec
tin_1 = os.times()
job.timeStageIn = int(round(tin_1[4] - tin_0[4]))
# Extract any FAX info from the dictionary
if FAX_dictionary.has_key('N_filesWithoutFAX'):
job.filesWithoutFAX = FAX_dictionary['N_filesWithoutFAX']
if FAX_dictionary.has_key('N_filesWithFAX'):
job.filesWithFAX = FAX_dictionary['N_filesWithFAX']
if FAX_dictionary.has_key('bytesWithoutFAX'):
job.bytesWithoutFAX = FAX_dictionary['bytesWithoutFAX']
if FAX_dictionary.has_key('bytesWithFAX'):
job.bytesWithFAX = FAX_dictionary['bytesWithFAX']
if FAX_dictionary.has_key('usedFAXandDirectIO'):
usedFAXandDirectIO = FAX_dictionary['usedFAXandDirectIO']
return job, ins, statusPFCTurl, usedFAXandDirectIO
def getTrfExitInfo(self, exitCode, workdir):
""" Get the trf exit code and info from job report if possible """
exitAcronym = ""
exitMsg = ""
# does the job report exist?
extension = getExtension(alternative='pickle')
if extension.lower() == "json":
_filename = "jobReport.%s" % (extension)
else:
_filename = "jobReportExtract.%s" % (extension)
filename = os.path.join(workdir, _filename)
if os.path.exists(filename):
tolog("Found job report: %s" % (filename))
# wait a few seconds to make sure the job report is finished
tolog("Taking a 5s nap to make sure the job report is finished")
time.sleep(5)
# first backup the jobReport to the job workdir since it will be needed later
# (the current location will disappear since it will be tarred up in the jobs' log file)
d = os.path.join(workdir, '..')
try:
copy2(filename, os.path.join(d, _filename))
except Exception, e:
tolog("Warning: Could not backup %s to %s: %s" % (_filename, d, e))
else:
tolog("Backed up %s to %s" % (_filename, d))
# search for the exit code
try:
f = open(filename, "r")
except Exception, e:
tolog("!!WARNING!!1112!! Failed to open job report: %s" % (e))
else:
if extension.lower() == "json":
from json import load
else:
from pickle import load
data = load(f)
# extract the exit code and info
_exitCode = self.extractDictionaryObject("exitCode", data)
if _exitCode:
if _exitCode == 0 and exitCode != 0:
tolog("!!WARNING!!1111!! Detected inconsistency in %s: exitcode listed as 0 but original trf exit code was %d (using original error code)" %\
(filename, exitCode))
else:
exitCode = _exitCode
_exitAcronym = self.extractDictionaryObject("exitAcronym", data)
if _exitAcronym:
exitAcronym = _exitAcronym
_exitMsg = self.extractDictionaryObject("exitMsg", data)
if _exitMsg:
exitMsg = _exitMsg
f.close()
tolog("Trf exited with:")
tolog("...exitCode=%d" % (exitCode))
tolog("...exitAcronym=%s" % (exitAcronym))
tolog("...exitMsg=%s" % (exitMsg))
else:
tolog("Job report not found: %s" % (filename))
return exitCode, exitAcronym, exitMsg
def extractDictionaryObject(self, obj, dictionary):
""" Extract an object from a dictionary """
_obj = None
try:
_obj = dictionary[obj]
except Exception, e:
tolog("Object %s not found in dictionary" % (obj))
else:
tolog('Extracted \"%s\"=%s from dictionary' % (obj, _obj))
return _obj
def getMemoryUtilityCommand(self, pid, summary="summary.json"):
""" Prepare the memory utility command string """
interval = 60
path = "/afs/cern.ch/work/n/nrauschm/public/MemoryMonitoringTool/MemoryMonitor"
cmd = ""
try:
tolog("2. Process id of job command: %d" % (pid))
except Exception, e:
tolog("Exception caught: %s" % (e))
# Construct the name of the output file using the summary variable
if summary.endswith('.json'):
output = summary.replace('.json', '.txt')
else:
output = summary + '.txt'
if os.path.exists(path):
cmd = "%s --pid %d --filename %s --json-summary %s --interval %d" % (path, pid, output, summary, interval)
else:
tolog("Path does not exist: %s" % (path))
return cmd
def executePayload(self, thisExperiment, runCommandList, job):
""" execute the payload """
# do not hide the proxy for PandaMover since it needs it or for sites that has sc.proxy = donothide
# if 'DDM' not in jobSite.sitename and readpar('proxy') != 'donothide':
# # create the proxy guard object (must be created here before the sig2exc())
# proxyguard = ProxyGuard()
#
# # hide the proxy
# hP_ret = proxyguard.hideProxy()
# if not hP_ret:
# tolog("Warning: Proxy exposed to payload")
# run the payload process, which could take days to finish
t0 = os.times()
tolog("t0 = %s" % str(t0))
res_tuple = (0, 'Undefined')
multi_trf = self.isMultiTrf(runCommandList)
_stdout = job.stdout
_stderr = job.stderr
# loop over all run commands (only >1 for multi-trfs)
current_job_number = 0
getstatusoutput_was_interrupted = False
number_of_jobs = len(runCommandList)
for cmd in runCommandList:
current_job_number += 1
# create the stdout/err files
if multi_trf:
job.stdout = _stdout.replace(".txt", "_%d.txt" % (current_job_number))
job.stderr = _stderr.replace(".txt", "_%d.txt" % (current_job_number))
file_stdout, file_stderr = self.getStdoutStderrFileObjects(stdoutName=job.stdout, stderrName=job.stderr)
if not (file_stdout and file_stderr):
res_tuple = (1, "Could not open stdout/stderr files, piping not possible")
tolog("!!WARNING!!2222!! %s" % (res_tuple[1]))
break
try:
# add the full job command to the job_setup.sh file
to_script = cmd.replace(";", ";\n")
thisExperiment.updateJobSetupScript(job.workdir, to_script=to_script)
tolog("Executing job command %d/%d" % (current_job_number, number_of_jobs))
# Start the subprocess
main_subprocess = self.getSubprocess(thisExperiment, cmd, stdout=file_stdout, stderr=file_stderr)
if main_subprocess:
time.sleep(2)
try:
tolog("Process id of job command: %d" % (main_subprocess.pid))
except Exception, e:
tolog("1. Exception caught: %s" % (e))
# Start the memory utility if required
mem_subprocess = None
if thisExperiment.shouldExecuteMemoryMonitor():
summary = thisExperiment.getMemoryMonitorJSONFilename()
mem_cmd = self.getMemoryUtilityCommand(main_subprocess.pid, summary=summary)
if mem_cmd != "":
mem_subprocess = self.getSubprocess(thisExperiment, mem_cmd)
if mem_subprocess:
try:
tolog("Process id of memory monitor: %d" % (mem_subprocess.pid))
except Exception, e:
tolog("3. Exception caught: %s" % (e))
else:
tolog("Could not launch memory monitor since the command path does not exist")
else:
tolog("Not required to run memory monitor")
# Loop until the main subprocess has finished
while main_subprocess.poll() is None:
# ..
# Take a short nap
time.sleep(1)
# Stop the memory monitor
if mem_subprocess:
mem_subprocess.send_signal(signal.SIGUSR1)
tolog("Terminated the memory monitor subprocess")
# Move the output JSON to the pilots init dir
try:
copy2("%s/*.json" % (job.workdir), "%s/." % (self.__pworkdir))
except Exception, e:
tolog("!!WARNING!!2222!! Caught exception while trying to copy JSON files: %s" % (e))
# Handle main subprocess errors
try:
stdout = open(job.stdout, 'r')
res_tuple = (main_subprocess.returncode, tail(stdout))
except Exception, e:
tolog("!!WARNING!!3002!! Failed during tail operation: %s" % (e))
else:
tolog("Tail:\n%s" % (res_tuple[1]))
stdout.close()
else:
res_tuple = (1, "Popen ended prematurely (payload command failed to execute, see stdout/err)")
tolog("!!WARNING!!3001!! %s" % (res_tuple[1]))
except Exception, e:
tolog("!!FAILED!!3000!! Failed to run command: %s" % str(e))
getstatusoutput_was_interrupted = True
if self.__failureCode:
job.result[2] = self.__failureCode
tolog("!!FAILED!!3000!! Failure code: %d" % (self.__failureCode))
break
else:
if res_tuple[0] == 0:
tolog("Job command %d/%d finished" % (current_job_number, number_of_jobs))
else:
tolog("Job command %d/%d failed: res = %s" % (current_job_number, number_of_jobs, str(res_tuple)))
break
t1 = os.times()
tolog("t1 = %s" % str(t1))
t = map(lambda x, y:x-y, t1, t0) # get the time consumed
job.cpuConsumptionUnit, job.cpuConsumptionTime, job.cpuConversionFactor = pUtil.setTimeConsumed(t)
tolog("Job CPU usage: %s %s" % (job.cpuConsumptionTime, job.cpuConsumptionUnit))
tolog("Job CPU conversion factor: %1.10f" % (job.cpuConversionFactor))
job.timeExe = int(round(t1[4] - t0[4]))
tolog("Original exit code: %d" % (res_tuple[0]))
tolog("Exit code: %d (returned from OS)" % (res_tuple[0]%255))
# check the job report for any exit code that should replace the res_tuple[0]
res0, exitAcronym, exitMsg = self.getTrfExitInfo(res_tuple[0], job.workdir)
res = (res0, res_tuple[1], exitMsg)
# dump an extract of the payload output
if number_of_jobs > 1:
_stdout = job.stdout
_stderr = job.stderr
_stdout = _stdout.replace(".txt", "_N.txt")
_stderr = _stderr.replace(".txt", "_N.txt")
tolog("NOTE: For %s output, see files %s, %s (N = [1, %d])" % (job.payload, _stdout, _stderr, number_of_jobs))
else:
tolog("NOTE: For %s output, see files %s, %s" % (job.payload, job.stdout, job.stderr))
# JEM job-end callback
try:
from JEMstub import notifyJobEnd2JEM
notifyJobEnd2JEM(job, tolog)
except:
pass # don't care (fire and forget)
# restore the proxy
#if hP_ret:
# rP_ret = proxyguard.restoreProxy()
# if not rP_ret:
# tolog("Warning: Problems with storage can occur since proxy could not be restored")
# else:
# hP_ret = False
# tolog("ProxyGuard has finished successfully")
return res, job, getstatusoutput_was_interrupted, current_job_number
def moveTrfMetadata(self, workdir, jobId):
""" rename and copy the trf metadata """
oldMDName = "%s/metadata.xml" % (workdir)
_filename = "metadata-%s.xml.PAYLOAD" % (jobId)
newMDName = "%s/%s" % (workdir, _filename)
try:
os.rename(oldMDName, newMDName)
except:
tolog("Warning: Could not open the original %s file, but harmless, pass it" % (oldMDName))
pass
else:
tolog("Renamed %s to %s" % (oldMDName, newMDName))
# now move it to the pilot work dir
try:
copy2(newMDName, "%s/%s" % (self.__pworkdir, _filename))
except Exception, e:
tolog("Warning: Could not copy %s to site work dir: %s" % (_filename, str(e)))
else:
tolog("Metadata was transferred to site work dir: %s/%s" % (self.__pworkdir, _filename))
def createFileMetadata(self, outFiles, job, outsDict, dsname, datasetDict, sitename, analysisJob=False):
""" create the metadata for the output + log files """
ec = 0
# get/assign guids to the output files
if outFiles:
if not pUtil.isBuildJob(outFiles):
ec, job.pilotErrorDiag, job.outFilesGuids = RunJobUtilities.getOutFilesGuids(job.outFiles, job.workdir)
if ec:
# missing PoolFileCatalog (only error code from getOutFilesGuids)
return ec, job, None
else:
tolog("Build job - do not use PoolFileCatalog to get guid (generated)")
else:
tolog("This job has no output files")
# get the file sizes and checksums for the local output files
# WARNING: any errors are lost if occur in getOutputFileInfo()
ec, pilotErrorDiag, fsize, checksum = pUtil.getOutputFileInfo(list(outFiles), getChecksumCommand(), skiplog=True, logFile=job.logFile)
if ec != 0:
tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag))
self.failJob(job.result[1], ec, job, pilotErrorDiag=pilotErrorDiag)
if self.__logguid:
guid = self.__logguid
else:
guid = job.tarFileGuid
# create preliminary metadata (no metadata yet about log file - added later in pilot.py)
_fname = "%s/metadata-%s.xml" % (job.workdir, job.jobId)
try:
_status = pUtil.PFCxml(job.experiment, _fname, list(job.outFiles), fguids=job.outFilesGuids, fntag="lfn", alog=job.logFile, alogguid=guid,\
fsize=fsize, checksum=checksum, analJob=analysisJob)
except Exception, e:
pilotErrorDiag = "PFCxml failed due to problematic XML: %s" % (e)
tolog("!!WARNING!!1113!! %s" % (pilotErrorDiag))
self.failJob(job.result[1], error.ERR_MISSINGGUID, job, pilotErrorDiag=pilotErrorDiag)
else:
if not _status:
pilotErrorDiag = "Missing guid(s) for output file(s) in metadata"
tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag))
self.failJob(job.result[1], error.ERR_MISSINGGUID, job, pilotErrorDiag=pilotErrorDiag)
tolog("..............................................................................................................")
tolog("Created %s with:" % (_fname))
tolog(".. log : %s (to be transferred)" % (job.logFile))
tolog(".. log guid : %s" % (guid))
tolog(".. out files : %s" % str(job.outFiles))
tolog(".. out file guids : %s" % str(job.outFilesGuids))
tolog(".. fsize : %s" % str(fsize))
tolog(".. checksum : %s" % str(checksum))
tolog("..............................................................................................................")
# convert the preliminary metadata-<jobId>.xml file to OutputFiles-<jobId>.xml for NG and for CERNVM
# note: for CERNVM this is only really needed when CoPilot is used
if os.environ.has_key('Nordugrid_pilot') or sitename == 'CERNVM':
if RunJobUtilities.convertMetadata4NG(os.path.join(job.workdir, job.outputFilesXML), _fname, outsDict, dsname, datasetDict):
tolog("Metadata has been converted to NG/CERNVM format")
else:
job.pilotErrorDiag = "Could not convert metadata to NG/CERNVM format"
tolog("!!WARNING!!1999!! %s" % (job.pilotErrorDiag))
# try to build a file size and checksum dictionary for the output files
# outputFileInfo: {'a.dat': (fsize, checksum), ...}
# e.g.: file size for file a.dat: outputFileInfo['a.dat'][0]
# checksum for file a.dat: outputFileInfo['a.dat'][1]
try:
# remove the log entries
_fsize = fsize[1:]
_checksum = checksum[1:]
outputFileInfo = dict(zip(job.outFiles, zip(_fsize, _checksum)))
except Exception, e:
tolog("!!WARNING!!2993!! Could not create output file info dictionary: %s" % str(e))
outputFileInfo = {}
else:
tolog("Output file info dictionary created: %s" % str(outputFileInfo))
return ec, job, outputFileInfo
def getDatasets(self, job):
""" get the datasets for the output files """
# get the default dataset
if job.destinationDblock and job.destinationDblock[0] != 'NULL' and job.destinationDblock[0] != ' ':
dsname = job.destinationDblock[0]
else:
dsname = "%s-%s-%s" % (time.localtime()[0:3]) # pass it a random name
# create the dataset dictionary
# (if None, the dsname above will be used for all output files)
datasetDict = getDatasetDict(job.outFiles, job.destinationDblock, job.logFile, job.logDblock)
if datasetDict:
tolog("Dataset dictionary has been verified")
else:
tolog("Dataset dictionary could not be verified, output files will go to: %s" % (dsname))
return dsname, datasetDict
def stageOut(self, job, jobSite, outs, analysisJob, dsname, datasetDict, outputFileInfo):
""" perform the stage-out """
error = PilotErrors()
pilotErrorDiag = ""
rc = 0
latereg = False
rf = None
# generate the xml for the output files and the site mover
pfnFile = "OutPutFileCatalog.xml"
try:
_status = pUtil.PFCxml(job.experiment, pfnFile, outs, fguids=job.outFilesGuids, fntag="pfn")
except Exception, e:
job.pilotErrorDiag = "PFCxml failed due to problematic XML: %s" % (e)
tolog("!!WARNING!!1113!! %s" % (job.pilotErrorDiag))
return error.ERR_MISSINGGUID, job, rf, latereg
else:
if not _status:
job.pilotErrorDiag = "Metadata contains missing guid(s) for output file(s)"
tolog("!!WARNING!!2999!! %s" % (job.pilotErrorDiag))
return error.ERR_MISSINGGUID, job, rf, latereg
tolog("Using the newly-generated %s/%s for put operation" % (job.workdir, pfnFile))
# the cmtconfig is needed by at least the xrdcp site mover
cmtconfig = getCmtconfig(job.cmtconfig)
rs = "" # return string from put_data with filename in case of transfer error
tin_0 = os.times()
try:
rc, job.pilotErrorDiag, rf, rs, job.filesNormalStageOut, job.filesAltStageOut = mover.mover_put_data("xmlcatalog_file:%s" % (pfnFile), dsname, jobSite.sitename,\
ub=jobSite.dq2url, analysisJob=analysisJob, pinitdir=self.__pilot_initdir, scopeOut=job.scopeOut,\
proxycheck=self.__proxycheckFlag, spsetup=job.spsetup, token=job.destinationDBlockToken,\
userid=job.prodUserID, datasetDict=datasetDict, prodSourceLabel=job.prodSourceLabel,\
outputDir=self.__outputDir, jobId=job.jobId, jobWorkDir=job.workdir, DN=job.prodUserID,\
dispatchDBlockTokenForOut=job.dispatchDBlockTokenForOut, outputFileInfo=outputFileInfo,\
lfcreg=self.__fileCatalogRegistration, jobDefId=job.jobDefinitionID, jobCloud=job.cloud, logFile=job.logFile,\
stageoutTries=self.__stageoutretry, cmtconfig=cmtconfig, experiment=self.__experiment, fileDestinationSE=job.fileDestinationSE)
tin_1 = os.times()
job.timeStageOut = int(round(tin_1[4] - tin_0[4]))
except Exception, e:
tin_1 = os.times()
job.timeStageOut = int(round(tin_1[4] - tin_0[4]))
if 'format_exc' in traceback.__all__:
trace = traceback.format_exc()
pilotErrorDiag = "Put function can not be called for staging out: %s, %s" % (str(e), trace)
else:
tolog("traceback.format_exc() not available in this python version")
pilotErrorDiag = "Put function can not be called for staging out: %s" % (str(e))
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
rc = error.ERR_PUTFUNCNOCALL
job.setState(["holding", job.result[1], rc])
else:
if job.pilotErrorDiag != "":
if job.pilotErrorDiag.startswith("Put error:"):
pre = ""
else:
pre = "Put error: "
job.pilotErrorDiag = pre + tailPilotErrorDiag(job.pilotErrorDiag, size=256-len("pilot: Put error: "))
tolog("Put function returned code: %d" % (rc))
if rc != 0:
# remove any trailing "\r" or "\n" (there can be two of them)
if rs != None:
rs = rs.rstrip()
tolog("Error string: %s" % (rs))
# is the job recoverable?
if error.isRecoverableErrorCode(rc):
_state = "holding"
_msg = "WARNING"
else:
_state = "failed"
_msg = "FAILED"
# look for special error in the error string
if rs == "Error: string Limit exceeded 250":
tolog("!!%s!!3000!! Put error: file name string limit exceeded 250" % (_msg))
job.setState([_state, job.result[1], error.ERR_LRCREGSTRSIZE])
else:
job.setState([_state, job.result[1], rc])
tolog("!!%s!!1212!! %s" % (_msg, error.getErrorStr(rc)))
else:
# set preliminary finished (may be overwritten below in the LRC registration)
job.setState(["finished", 0, 0])
# create a weak lockfile meaning that file transfer worked
# (useful for job recovery if activated) in the job workdir
createLockFile(True, jobSite.workdir, lockfile="ALLFILESTRANSFERRED")
# create another lockfile in the site workdir since a transfer failure can still occur during the log transfer
# and a later recovery attempt will fail (job workdir will not exist at that time)
createLockFile(True, self.__pworkdir, lockfile="ALLFILESTRANSFERRED")
if job.result[0] == "holding" and '(unrecoverable)' in job.pilotErrorDiag:
job.result[0] = "failed"
tolog("!!WARNING!!2999!! HOLDING state changed to FAILED since error is unrecoverable")
return rc, job, rf, latereg
def copyInputForFiles(self, workdir):
""" """
try:
cmd = "cp %s/inputFor_* %s" % (self.__pilot_initdir, workdir)
tolog("Executing command: %s" % (cmd))
out = commands.getoutput(cmd)
except IOError, e:
pass
tolog(out)
def getStdoutStderrFileObjects(self, stdoutName="stdout.txt", stderrName="stderr.txt"):
""" Create stdout/err file objects """
try:
stdout = open(os.path.join(os.getcwd(), stdoutName), "w")
stderr = open(os.path.join(os.getcwd(), stderrName), "w")
except Exception, e:
tolog("!!WARNING!!3330!! Failed to open stdout/err files: %s" % (e))
stdout = None
stderr = None
return stdout, stderr
def getSubprocess(self, thisExperiment, runCommand, stdout=None, stderr=None):
""" Execute a command as a subprocess """
# Execute and return the subprocess object
return thisExperiment.getSubprocess(runCommand, stdout=stdout, stderr=stderr)
# Methods used by event service RunJob* modules ..............................................................
def stripSetupCommand(self, cmd, trfName):
""" Remove the trf part of the setup command """
location = cmd.find(trfName)
return cmd[:location]
def executeMakeRunEventCollectionScript(self, cmd, eventcollection_filename):
""" Define and execute the event collection script """
cmd += "get_files -jo %s" % (eventcollection_filename)
tolog("Execute command: %s" % (cmd))
# WARNING: PUT A TIMER AROUND THIS COMMAND
rc, rs = commands.getstatusoutput(cmd)
return rc, rs
def prependMakeRunEventCollectionScript(self, input_file, output_file, eventcollection_filename):
""" Prepend the event collection script """
status = False
eventcollection_filename_mod = ""
with open(eventcollection_filename) as f1:
eventcollection_filename_mod = eventcollection_filename.replace(".py",".2.py")
with open(eventcollection_filename_mod, "w") as f2:
f2.write("EvtMax = -1\n")
f2.write("In = [ \'%s\' ]\n" % (input_file))
f2.write("Out = \'%s\'\n" % (output_file))
for line in f1:
f2.write(line)
f2.close()
f1.close()
status = True
return status, eventcollection_filename_mod
def executeTAGFileCommand(self, cmd, eventcollection_filename_mod):
""" Execute the TAG file creation script using athena """
cmd += "athena.py %s >MakeRunEventCollection-stdout.txt" % (eventcollection_filename_mod)
tolog("Executing command: %s" % (cmd))
# WARNING: PUT A TIMER AROUND THIS COMMAND
rc, rs = commands.getstatusoutput(cmd)
return rc, rs
def swapAthenaProcNumber(self, swap_value):
""" Swap the current ATHENA_PROC_NUMBER so that it does not upset the job """
# Note: only needed during TAG file creation
athena_proc_number = 0
try:
athena_proc_number = int(os.environ['ATHENA_PROC_NUMBER'])
except Exception, e:
tolog("ATHENA_PROC_NUMBER not defined, setting it to: %s" % (swap_value))
os.environ['ATHENA_PROC_NUMBER'] = str(swap_value)
else:
if swap_value == 0:
del os.environ['ATHENA_PROC_NUMBER']
tolog("Unset ATHENA_PROC_NUMBER")
else:
os.environ['ATHENA_PROC_NUMBER'] = str(swap_value)
tolog("ATHENA_PROC_NUMBER swapped from \'%d\' to \'%d\'" % (athena_proc_number, swap_value))
return athena_proc_number
def createTAGFile(self, jobExecutionCommand, trfName, inFiles, eventcollection_filename):
""" Create a TAG file """
tag_file = ""
tag_file_guid = getGUID()
# We cannot have ATHENA_PROC_NUMBER set to a value larger than 1, since that will
# activate AthenaMP. Reset it for now, and swap it back at the end of this method
athena_proc_number = self.swapAthenaProcNumber(0)
# Remove everything after the trf command from the job execution command
cmd = self.stripSetupCommand(jobExecutionCommand, trfName)
tolog("Stripped command: %s" % (cmd))
# Define and execute the event collection script
if cmd != "":
rc, rs = self.executeMakeRunEventCollectionScript(cmd, eventcollection_filename)
# Prepend the event collection script
if rc == 0:
input_file = inFiles[0]
tag_file = input_file + ".TAG"
status, eventcollection_filename_mod = self.prependMakeRunEventCollectionScript(input_file, tag_file, eventcollection_filename)
# Finally create the TAG file
if status:
rc, rs = self.executeTAGFileCommand(cmd, eventcollection_filename_mod)
if rc != 0:
tolog("!!WARNING!!3337!! Failed to create TAG file: rc=%d, rs=%s" % (rc, rs))
tag_file = ""
else:
tolog("!!WARNING!!3339!! Failed to download %s: rc=%d, rs=%s " % (eventcollection_filename, rc, rs))
else:
tolog("!!WARNING!!3330!! Failed to strip the job execution command, cannot create TAG file")
# Now swap the ATHENA_PROC_NUMBER since it is needed for activating AthenaMP
dummy = self.swapAthenaProcNumber(athena_proc_number)
return tag_file, tag_file_guid
# (end event service methods) ................................................................................
# main process starts here
if __name__ == "__main__":
# Get error handler
error = PilotErrors()
# Get runJob object
runJob = RunJob()
# Define a new parent group
os.setpgrp()
# Protect the runJob code with exception handling
hP_ret = False
try:
# always use this filename as the new jobDef module name
import newJobDef
jobSite = Site.Site()
return_tuple = runJob.argumentParser()
tolog("argumentParser returned: %s" % str(return_tuple))
jobSite.setSiteInfo(return_tuple)
# jobSite.setSiteInfo(argParser(sys.argv[1:]))
# reassign workdir for this job
jobSite.workdir = jobSite.wntmpdir
if runJob.getPilotLogFilename() != "":
pUtil.setPilotlogFilename(runJob.getPilotLogFilename())
# set node info
node = Node.Node()
node.setNodeName(os.uname()[1])
node.collectWNInfo(jobSite.workdir)
# redirect stder
sys.stderr = open("%s/runjob.stderr" % (jobSite.workdir), "w")
tolog("Current job workdir is: %s" % os.getcwd())
tolog("Site workdir is: %s" % jobSite.workdir)
# get the experiment object
thisExperiment = getExperiment(runJob.getExperiment())
tolog("RunJob will serve experiment: %s" % (thisExperiment.getExperiment()))
# set the cache (used e.g. by LSST)
if runJob.getCache():
thisExperiment.setCache(runJob.getCache())
JR = JobRecovery()
try:
job = Job.Job()
job.workdir = jobSite.workdir
job.setJobDef(newJobDef.job)
job.workdir = jobSite.workdir
job.experiment = runJob.getExperiment()
# figure out and set payload file names
job.setPayloadName(thisExperiment.getPayloadName(job))
except Exception, e:
pilotErrorDiag = "Failed to process job info: %s" % str(e)
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
runJob.failJob(0, error.ERR_UNKNOWN, job, pilotErrorDiag=pilotErrorDiag)
# prepare for the output file data directory
# (will only created for jobs that end up in a 'holding' state)
job.datadir = runJob.getParentWorkDir() + "/PandaJob_%s_data" % (job.jobId)
# register cleanup function
atexit.register(runJob.cleanup, job)
# to trigger an exception so that the SIGTERM signal can trigger cleanup function to run
# because by default signal terminates process without cleanup.
def sig2exc(sig, frm):
""" signal handler """
error = PilotErrors()
runJob.setGlobalPilotErrorDiag("!!FAILED!!3000!! SIGTERM Signal %s is caught in child pid=%d!\n" % (sig, os.getpid()))
tolog(runJob.getGlobalPilotErrorDiag())
if sig == signal.SIGTERM:
runJob.setGlobalErrorCode(error.ERR_SIGTERM)
elif sig == signal.SIGQUIT:
runJob.setGlobalErrorCode(error.ERR_SIGQUIT)
elif sig == signal.SIGSEGV:
runJob.setGlobalErrorCode(error.ERR_SIGSEGV)
elif sig == signal.SIGXCPU:
runJob.setGlobalErrorCode(error.ERR_SIGXCPU)
elif sig == signal.SIGBUS:
runJob.setGlobalErrorCode(error.ERR_SIGBUS)
elif sig == signal.SIGUSR1:
runJob.setGlobalErrorCode(error.ERR_SIGUSR1)
else:
runJob.setGlobalErrorCode(error.ERR_KILLSIGNAL)
runJob.setFailureCode(runJob.getGlobalErrorCode())
# print to stderr
print >> sys.stderr, runJob.getGlobalPilotErrorDiag()
raise SystemError(sig)
signal.signal(signal.SIGTERM, sig2exc)
signal.signal(signal.SIGQUIT, sig2exc)
signal.signal(signal.SIGSEGV, sig2exc)
signal.signal(signal.SIGXCPU, sig2exc)
signal.signal(signal.SIGUSR1, sig2exc)
signal.signal(signal.SIGBUS, sig2exc)
# see if it's an analysis job or not
analysisJob = isAnalysisJob(job.trf.split(",")[0])
if analysisJob:
tolog("User analysis job")
else:
tolog("Production job")
tolog("runJob received a job with prodSourceLabel=%s" % (job.prodSourceLabel))
# setup starts here ................................................................................
# update the job state file
job.jobState = "setup"
_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# send [especially] the process group back to the pilot
job.setState([job.jobState, 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# prepare the setup and get the run command list
ec, runCommandList, job, multi_trf = runJob.setup(job, jobSite, thisExperiment)
if ec != 0:
tolog("!!WARNING!!2999!! runJob setup failed: %s" % (job.pilotErrorDiag))
runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag)
tolog("Setup has finished successfully")
# job has been updated, display it again
job.displayJob()
# (setup ends here) ................................................................................
tolog("Setting stage-in state until all input files have been copied")
job.setState(["stagein", 0, 0])
# send the special setup string back to the pilot (needed for the log transfer on xrdcp systems)
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# stage-in .........................................................................................
# update the job state file
job.jobState = "stagein"
_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# update copysetup[in] for production jobs if brokerage has decided that remote I/O should be used
if job.transferType == 'direct':
tolog('Brokerage has set transfer type to \"%s\" (remote I/O will be attempted for input files, any special access mode will be ignored)' %\
(job.transferType))
RunJobUtilities.updateCopysetups('', transferType=job.transferType)
# stage-in all input files (if necessary)
job, ins, statusPFCTurl, usedFAXandDirectIO = runJob.stageIn(job, jobSite, analysisJob)
if job.result[2] != 0:
tolog("Failing job with ec: %d" % (ec))
runJob.failJob(0, job.result[2], job, ins=ins, pilotErrorDiag=job.pilotErrorDiag)
# after stageIn, all file transfer modes are known (copy_to_scratch, file_stager, remote_io)
# consult the FileState file dictionary if cmd3 should be updated (--directIn should not be set if all
# remote_io modes have been changed to copy_to_scratch as can happen with ByteStream files)
# and update the run command list if necessary.
# in addition to the above, if FAX is used as a primary site mover and direct access is enabled, then
# the run command should not contain the --oldPrefix, --newPrefix, --lfcHost options but use --usePFCTurl
if job.inFiles != ['']:
runCommandList = RunJobUtilities.updateRunCommandList(runCommandList, runJob.getParentWorkDir(), job.jobId, statusPFCTurl, analysisJob, usedFAXandDirectIO)
# copy any present @inputFor_* files from the pilot init dir to the rundirectory (used for ES merge jobs)
#runJob.copyInputForFiles(job.workdir)
# (stage-in ends here) .............................................................................
# change to running state since all input files have been staged
tolog("Changing to running state since all input files have been staged")
job.setState(["running", 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# update the job state file
job.jobState = "running"
_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# run the job(s) ...................................................................................
# Set ATLAS_CONDDB if necessary, and other env vars
RunJobUtilities.setEnvVars(jobSite.sitename)
# execute the payload
res, job, getstatusoutput_was_interrupted, current_job_number = runJob.executePayload(thisExperiment, runCommandList, job)
# if payload leaves the input files, delete them explicitly
if ins:
ec = pUtil.removeFiles(job.workdir, ins)
# payload error handling
ed = ErrorDiagnosis()
job = ed.interpretPayload(job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, runJob.getFailureCode())
if job.result[1] != 0 or job.result[2] != 0:
runJob.failJob(job.result[1], job.result[2], job, pilotErrorDiag=job.pilotErrorDiag)
# stage-out ........................................................................................
# update the job state file
job.jobState = "stageout"
_retjs = JR.updateJobStateTest(job, jobSite, node, mode="test")
# verify and prepare and the output files for transfer
ec, pilotErrorDiag, outs, outsDict = RunJobUtilities.prepareOutFiles(job.outFiles, job.logFile, job.workdir)
if ec:
# missing output file (only error code from prepareOutFiles)
runJob.failJob(job.result[1], ec, job, pilotErrorDiag=pilotErrorDiag)
tolog("outsDict: %s" % str(outsDict))
# update the current file states
updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="created")
dumpFileStates(runJob.getParentWorkDir(), job.jobId)
# create xml string to pass to dispatcher for atlas jobs
outputFileInfo = {}
if outs or (job.logFile and job.logFile != ''):
# get the datasets for the output files
dsname, datasetDict = runJob.getDatasets(job)
# re-create the metadata.xml file, putting guids of ALL output files into it.
# output files that miss guids from the job itself will get guids in PFCxml function
# first rename and copy the trf metadata file for non-build jobs
if not pUtil.isBuildJob(outs):
runJob.moveTrfMetadata(job.workdir, job.jobId)
# create the metadata for the output + log files
ec, job, outputFileInfo = runJob.createFileMetadata(list(outs), job, outsDict, dsname, datasetDict, jobSite.sitename, analysisJob=analysisJob)
if ec:
runJob.failJob(0, ec, job, pilotErrorDiag=job.pilotErrorDiag)
# move output files from workdir to local DDM area
finalUpdateDone = False
if outs:
tolog("Setting stage-out state until all output files have been copied")
job.setState(["stageout", 0, 0])
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort())
# stage-out output files
ec, job, rf, latereg = runJob.stageOut(job, jobSite, outs, analysisJob, dsname, datasetDict, outputFileInfo)
# error handling
if job.result[0] == "finished" or ec == error.ERR_PUTFUNCNOCALL:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True)
else:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True, latereg=latereg)
if ec == error.ERR_NOSTORAGE:
# update the current file states for all files since nothing could be transferred
updateFileStates(outs, runJob.getParentWorkDir(), job.jobId, mode="file_state", state="not_transferred")
dumpFileStates(runJob.getParentWorkDir(), job.jobId)
finalUpdateDone = True
if ec != 0:
runJob.sysExit(job, rf)
# (stage-out ends here) .......................................................................
job.setState(["finished", 0, 0])
if not finalUpdateDone:
rt = RunJobUtilities.updatePilotServer(job, runJob.getPilotServer(), runJob.getPilotPort(), final=True)
runJob.sysExit(job)
except Exception, errorMsg:
error = PilotErrors()
if runJob.getGlobalPilotErrorDiag() != "":
pilotErrorDiag = "Exception caught in runJob: %s" % (runJob.getGlobalPilotErrorDiag())
else:
pilotErrorDiag = "Exception caught in runJob: %s" % str(errorMsg)
if 'format_exc' in traceback.__all__:
pilotErrorDiag += ", " + traceback.format_exc()
try:
tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag))
except Exception, e:
if len(pilotErrorDiag) > 10000:
pilotErrorDiag = pilotErrorDiag[:10000]
tolog("!!FAILED!!3001!! Truncated (%s): %s" % (e, pilotErrorDiag))
else:
pilotErrorDiag = "Exception caught in runJob: %s" % (e)
tolog("!!FAILED!!3001!! %s" % (pilotErrorDiag))
# # restore the proxy if necessary
# if hP_ret:
# rP_ret = proxyguard.restoreProxy()
# if not rP_ret:
# tolog("Warning: Problems with storage can occur since proxy could not be restored")
# else:
# hP_ret = False
# tolog("ProxyGuard has finished successfully")
tolog("sys.path=%s" % str(sys.path))
cmd = "pwd;ls -lF %s;ls -lF;ls -lF .." % (runJob.getPilotInitDir())
tolog("Executing command: %s" % (cmd))
out = commands.getoutput(cmd)
tolog("%s" % (out))
job = Job.Job()
job.setJobDef(newJobDef.job)
job.pilotErrorDiag = pilotErrorDiag
job.result[0] = "failed"
if runJob.getGlobalErrorCode() != 0:
job.result[2] = runJob.getGlobalErrorCode()
else:
job.result[2] = error.ERR_RUNJOBEXC
tolog("Failing job with error code: %d" % (job.result[2]))
# fail the job without calling sysExit/cleanup (will be called anyway)
runJob.failJob(0, job.result[2], job, pilotErrorDiag=pilotErrorDiag, docleanup=False)
# end of runJob
| RRCKI/pilot | RunJob.py | Python | apache-2.0 | 69,493 |
from mongoengine import *
from models.zips import Zips
from geopy import distance
from geopy import Point
connect('scratch', host='mongodb://142.133.150.180/scratch')
# zipins = Zips(zipcode=999999, city="testlocation", loc=[1.0,1.0],pop=12345, state="ZZ").save()
locationList = []
location = {}
distanceList = []
for zip in Zips.objects:
locationList.append(zip)
for location1 in locationList:
if location1.city=="BEVERLY HILLS" :
point1 = Point(location1.loc[0], location1.loc[1])
for location2 in locationList:
if location1 != location2 and location2.city !="BEVERLY HILLS":
point2 = Point(location2.loc[0], location2.loc[1])
if(distance.distance(point1, point2) < 5):
distanceList.append(location2)
for location in distanceList:
print (location.city, location.zipcode)
| rainmakeross/python-dataanalysis | app.py | Python | apache-2.0 | 871 |
#!/usr/bin/env python
"""
Standaone Rule
==============
This is a customer spec, parser and rule and can be run
against the local host using the following command::
$ insights-run -p examples.rules.stand_alone
or from the examples/rules directory::
$ ./stand_alone.py
"""
from __future__ import print_function
from collections import namedtuple
from insights import get_active_lines, parser, Parser
from insights import make_fail, make_pass, rule, run
from insights.core.spec_factory import SpecSet, simple_file
from insights.parsers.redhat_release import RedhatRelease
# Error key used in make_fail
ERROR_KEY = "TOO_MANY_HOSTS"
# jinga2 template displayed for rule responses
CONTENT = {
make_fail: """Too many hosts in /etc/hosts: {{num}}""",
make_pass: """Just right"""
}
class Specs(SpecSet):
""" Datasources for collection from local host """
hosts = simple_file("/etc/hosts")
@parser(Specs.hosts)
class HostParser(Parser):
"""
Parses the results of the ``hosts`` Specs
Attributes:
hosts (list): List of the namedtuple Host
which are the contents of the hosts file
including ``.ip``, ``.host``, and ``.aliases``.
"""
Host = namedtuple("Host", ["ip", "host", "aliases"])
def parse_content(self, content):
"""
Method to parse the contents of file ``/etc/hosts``
This method must be implemented by each parser.
Arguments:
content (list): List of strings that are the contents
of the /etc/hosts file.
"""
self.hosts = []
for line in get_active_lines(content):
# remove inline comments
line = line.partition("#")[0].strip()
# break the line into parts
parts = line.split()
ip, host = parts[:2]
aliases = parts[2:]
self.hosts.append(HostParser.Host(ip, host, aliases))
def __repr__(self):
""" str: Returns string representation of the class """
me = self.__class__.__name__
msg = "%s([" + ", ".join([str(d) for d in self.hosts]) + "])"
return msg % me
@rule(HostParser, RedhatRelease, content=CONTENT)
def report(hp, rhr):
"""
Rule reports a response if there is more than 1 host
entry defined in the /etc/hosts file.
Arguments:
hp (HostParser): Parser object for the custom parser in this
module.
rhr (RedhatRelease): Parser object for the /etc/redhat-release
file.
"""
if len(hp.hosts) > 1:
return make_fail("TOO_MANY_HOSTS", num=len(hp.hosts))
return make_pass("TOO_MANY_HOSTS", num=len(hp.hosts))
if __name__ == "__main__":
run(report, print_summary=True)
| RedHatInsights/insights-core | examples/rules/stand_alone.py | Python | apache-2.0 | 2,746 |
# Copyright (c) 2002-2016, California Institute of Technology.
# All rights reserved. Based on Government Sponsored Research under contracts NAS7-1407 and/or NAS7-03001.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the California Institute of Technology (Caltech), its operating division the Jet Propulsion Laboratory (JPL),
# the National Aeronautics and Space Administration (NASA), nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE CALIFORNIA INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains various utilities for the OnEarth test routines.
"""
import os
import errno
import subprocess
#from shutil import copyfile, copy
import shutil
import xml.dom.minidom
import hashlib
import shlex
from dateutil.relativedelta import relativedelta
import sqlite3
import urllib.request, urllib.error, urllib.parse
import io
import gzip
import mapbox_vector_tile
from lxml import etree
import requests
import sys
import platform
import smtpd
import threading
import asyncore
import http
# cElementTree deprecated in python 3.3
from xml.etree import cElementTree as ElementTree
import redis
class DebuggingServerThread(threading.Thread):
def __init__(self, addr='localhost', port=1025):
threading.Thread.__init__(self)
self.server = smtpd.DebuggingServer((addr, port), None)
def run(self):
asyncore.loop(timeout=5)
def stop(self):
self.server.close()
self.join()
class XmlListConfig(list):
def __init__(self, aList):
for element in aList:
if element:
# treat like dict
if len(element) == 1 or element[0].tag != element[1].tag:
self.append(XmlDictConfig(element))
# treat like list
elif element[0].tag == element[1].tag:
self.append(XmlListConfig(element))
elif element.text:
text = element.text.strip()
if text:
self.append(text)
class XmlDictConfig(dict):
'''
Example usage:
>>> tree = ElementTree.parse('your_file.xml')
>>> root = tree.getroot()
>>> xmldict = XmlDictConfig(root)
Or, if you want to use an XML string:
>>> root = ElementTree.XML(xml_string)
>>> xmldict = XmlDictConfig(root)
And then use xmldict for what it is... a dict.
'''
def __init__(self, parent_element):
childrenNames = [child.tag for child in parent_element.getchildren()]
if list(parent_element.items()):
self.update(dict(list(parent_element.items())))
for element in parent_element:
if element:
# treat like dict - we assume that if the first two tags
# in a series are different, then they are all different.
if len(element) == 1 or element[0].tag != element[1].tag:
aDict = XmlDictConfig(element)
# treat like list - we assume that if the first two tags
# in a series are the same, then the rest are the same.
else:
# here, we put the list in dictionary; the key is the
# tag name the list elements all share in common, and
# the value is the list itself
aDict = {element[0].tag: XmlListConfig(element)}
# if the tag has attributes, add those to the dict
if list(element.items()):
aDict.update(dict(list(element.items())))
if childrenNames.count(element.tag) > 1:
try:
currentValue = self[element.tag]
currentValue.append(aDict)
self.update({element.tag: currentValue})
except: #the first of its kind, an empty list must be created
self.update({
element.tag: [aDict]
}) #aDict is written in [], i.e. it will be a list
else:
self.update({element.tag: aDict})
# this assumes that if you've got an attribute in a tag,
# you won't be having any text. This may or may not be a
# good idea -- time will tell. It works for the way we are
# currently doing XML configuration files...
elif list(element.items()):
self.update({element.tag: dict(list(element.items()))})
#self[element.tag].update({"__Content__":element.text})
# finally, if there are no child tags and no attributes, extract
# the text
else:
if childrenNames.count(element.tag) > 1:
try:
currentValue = self[element.tag]
currentValue.append(element.text)
self.update({element.tag: currentValue})
except: #the first of its kind, an empty list must be created
self.update({
element.tag: [element.text]
}) # text is written in [], i.e. it will be a list
#self.update({element.tag: element.text})
class Error(EnvironmentError):
pass
def copytree_x(src, dst, symlinks=False, ignore=None, exist_ok=False):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an Error is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
The optional ignore argument is a callable. If given, it
is called with the `src` parameter, which is the directory
being visited by copytree_x(), and `names` which is the list of
`src` contents, as returned by os.listdir():
callable(src, names) -> ignored_names
Since copytree_x() is called recursively, the callable will be
called once for each directory that is copied. It returns a
list of names relative to the `src` directory that should
not be copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
if ignore is not None:
ignored_names = ignore(src, names)
else:
ignored_names = set()
# os.makedirs(dst, exist_ok=exist_ok)
try:
os.makedirs(dst)
except OSError as e:
if exist_ok:
if e.errno != errno.EEXIST:
raise
else:
raise
errors = []
for name in names:
if name in ignored_names:
continue
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
copytree_x(srcname, dstname, symlinks, ignore)
else:
# Will raise a SpecialFileError for unsupported file types
shutil.copy2(srcname, dstname)
# catch the Error from the recursive copytree so that we can
# continue with other files
except Error as err:
errors.extend(err.args[0])
except EnvironmentError as why:
errors.append((srcname, dstname, str(why)))
try:
shutil.copystat(src, dst)
except OSError as why:
if WindowsError is not None and isinstance(why, WindowsError):
# Copying file access times may fail on Windows
pass
else:
errors.append((src, dst, str(why)))
if errors:
raise Error(errors)
def add_trailing_slash(directory_path):
"""
Add trailing slash if one is not already present.
Argument:
directory_path -- path to which trailing slash should be confirmed.
"""
# Add trailing slash.
if directory_path[-1] != '/':
directory_path = str().join([directory_path, '/'])
# Return directory_path with trailing slash.
return directory_path
def restart_apache():
apache = subprocess.Popen(['httpd', '-k', 'restart'],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
# try:
# check_apache_running()
# if "el7" in platform.release():
# apache = subprocess.Popen('pkill --signal HUP --uid root httpd'.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# else:
# apache = subprocess.Popen(['apachectl', 'restart'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# except ValueError:
# apache = subprocess.Popen(['httpd'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
(stdout, stderr) = apache.communicate()
if stdout != None and len(stdout) != 0:
sys.stderr.write(
"\n=== STDOUT from restart_apache():\n%s\n===\n" % stdout.rstrip())
if stderr != None and len(stderr) != 0:
sys.stderr.write(
"\n=== STDERR from restart_apache():\n%s\n===\n" % stderr.rstrip())
subprocess.call(['sleep', '3'])
def restart_redis():
try:
check_redis_running()
# if "el7" in platform.release():
# subprocess.Popen('pkill --signal TERM --uid root redis-server'.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# redis = subprocess.Popen(['redis-server'], close_fds=True)
# else:
# subprocess.Popen('pkill --signal TERM --uid root redis-server'.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
# redis = subprocess.Popen(['redis-server'], close_fds=True)
except ValueError:
#redis = subprocess.Popen(['redis-server'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
redis = subprocess.Popen(['redis-server'], close_fds=True)
# (stdout, stderr) = redis.communicate()
# if stdout != None and len(stdout) != 0:
# sys.stderr.write("\n=== STDOUT from restart_redis():\n%s\n===\n" % stdout.rstrip())
# if stderr != None and len(stderr) != 0:
# sys.stderr.write("\n=== STDERR from restart_redis():\n%s\n===\n" % stderr.rstrip())
subprocess.call(['sleep', '2'])
def run_command(cmd, ignore_warnings=False, wait=True, ignore_errors=False):
"""
Runs the provided command on the terminal and prints any stderr output.
Arguments:
cmd -- the command to be executed.
ignore_warnings -- if set to True, warnings
will be ignored (defaults to False)
"""
print('\nRunning command: ' + cmd)
process = subprocess.Popen(cmd, universal_newlines=True, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if wait:
process.wait()
if not ignore_warnings:
output_err = open(cmd.split(' ')[0] + '.err', 'a')
for error in process.stderr:
if not ignore_warnings or "WARNING" not in error:
print(error)
output_err.write(error)
output_err.close
print('run_command stdout: ' + process.stdout.read())
print('run_command stderr: ' + process.stderr.read())
print('**************************************************************************************')
return None
def mrfgen_run_command(cmd, ignore_warnings=False, show_output=False):
"""
Runs the provided command on the terminal and prints any stderr output.
Arguments:
cmd -- the command to be executed.
ignore_warnings -- if set to True, warnings
will be ignored (defaults to False)
"""
process = subprocess.run(shlex.split(cmd), universal_newlines=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
print('run_command stdout: ' + process.stdout)
print('**************************************************************************************')
if show_output is True or 'error' in process.stdout.lower() or ignore_warnings and 'warning' in process.stdout.lower():
print(process.stdout)
def find_string(file_path, string):
try:
with open(file_path, 'r') as f:
result = any(line for line in f if string in line)
except OSError:
result = False
return result
def find_string_binary(file_path, string):
try:
with open(file_path, 'rb') as f:
result = any(line for line in f if string in line)
except OSError:
result = False
return result
def search_for_strings(string_list, file_path):
"""
Searches a given text file for a given string.
Returns True if Found, false otherwise.
Arguments:
string -- a list of strings to search for.
file_path -- the path of the file to search in.
"""
search_result = False
# Search each line for each item in the search list.
# If the line is found, it's removed from the search list.
with open(file_path, "r") as file:
for line in file:
line_result = next(
(string for string in string_list if string in line), None)
if line_result is not None:
string_list.remove(line_result)
# Return True if search list has been emptied out (everything found)
if not string_list:
search_result = True
return search_result
def get_file_hash(file):
"""
Creates an MD5 hash for a given file.
Returns True if it matches the given reference hash, False otherwise.
Arguments:
file -- file object to be hashed
ref_hash -- comparison hash string
"""
hasher = hashlib.md5()
hasher.update(file.read())
hash_value = str(hasher.hexdigest())
return hash_value
def create_continuous_period_test_files(path,
period_units,
period_length,
num_periods,
start_datetime,
prefix='',
suffix='_.mrf',
prototype_file=None,
make_year_dirs=False,
no_files=False):
"""
Fills a directory structure with files that have a continuous period interval between them
using the specified parameters.
Arguments:
path -- base directory tree to populate.
period_units -- unit size of each period in 'days', 'months', or 'years'.
period_length -- the length of each period in the aforementioned units.
num_periods -- the number of period files to create.
start_date -- a datetime.datetime object with the desired start date.
prefix -- (optional) a string to append to the beginning of each filename.
suffix -- (optional) a string to append to the end of each filename.
prototype_file -- (optional) a prototype file to create each copy from (otherwise creates just empty files).
make_year_dirs -- (optional) choose to create separate year dirs for the created files instead of dumping them all
in one dir.
no_files -- (optional) returns a list of dates but creates no files.
"""
if not no_files:
make_dir_tree(path)
# Keep track of each date so we can evaluate if a new year directory needs to be created.
test_dates = []
date = start_datetime
year_dir = ''
# Create a set of date intervals and corresponding dummy files
for x in range(0, num_periods + 1):
test_dates.append(date)
if any(
unit in period_units
for unit in ('hours', 'minutes', 'seconds')):
subdaily = True
else:
subdaily = False
if not no_files:
# Create year directory if requested
if make_year_dirs and (not x or test_dates[-1].year != date.year):
year_dir = str(date.year)
make_dir_tree(os.path.join(path, year_dir))
# Assemble new filename and create file, using prototype if specified
if subdaily is True:
time_string = str(date.hour).zfill(2) + str(
date.minute).zfill(2) + str(date.second).zfill(2)
else:
time_string = ''
filename = prefix + str(date.year) + str(
date.timetuple().tm_yday).zfill(3) + time_string + suffix
output_path = os.path.join(path, year_dir)
output_file = os.path.join(output_path, filename)
if prototype_file:
try:
copyfile(prototype_file, output_file)
except OSError:
pass
else:
open(output_file, 'a').close()
date += relativedelta(**{period_units: period_length})
return test_dates
def create_intermittent_period_test_files(path,
period_units,
period_length,
num_periods,
start_datetime,
prefix='',
suffix='_.mrf',
prototype_file=None,
make_year_dirs=False,
no_files=False):
"""
Fills a directory structure with files that have an intermittent period
using the specified parameters. Returns a list of all the date intervals
that were created.
Arguments:
path -- base directory tree to populate.
period_units -- unit size of each period in 'days', 'months', or 'years'.
period_length -- the length of each period in the aforementioned units
num_periods -- the number of interval pairs to create.
start_date -- a datetime.date object with the desired start date
prefix -- (optional) a string to append to the beginning of each filename.
suffix -- (optional) a string to append to the end of each filename.
prototype_file -- (optional) a prototype file to create each copy from (otherwise creates just empty files).
make_year_dirs -- (optional) choose to create separate year dirs for the created files instead of dumping them all
in one dir.
no_files -- (optional) returns a list of dates but creates no files.
"""
if not no_files:
make_dir_tree(path)
# Create a list of date intervals, each separated by the specified period length
test_dates = []
year_dir = ''
for x in range(num_periods):
# Create a new start date and end date for each interval requested
interval_set = []
for y in range(1, 5):
date = start_datetime + relativedelta(
**{period_units: period_length * y})
interval_set.append(date)
test_dates.append(interval_set)
# Push the start time of the next interval to twice the period distance from the end of the last interval
start_datetime = interval_set[-1] + relativedelta(
**{period_units: period_length * 2})
if not no_files:
if any(
unit in period_units
for unit in ('hours', 'minutes', 'seconds')):
subdaily = True
else:
subdaily = False
# If this is the first date or it has a different year than the previous, create that dir
if make_year_dirs and (not x
or test_dates[-1][-1].year != date.year):
year_dir = str(date.year)
make_dir_tree(os.path.join(path, year_dir))
for interval in interval_set:
if subdaily is True:
time_string = str(interval.hour).zfill(2) + str(
interval.minute).zfill(2) + str(
interval.second).zfill(2)
else:
time_string = ''
filename = prefix + str(interval.year) + str(
interval.timetuple().tm_yday).zfill(
3) + time_string + suffix
output_path = os.path.join(path, year_dir)
output_file = os.path.join(output_path, filename)
if prototype_file:
try:
copyfile(prototype_file, output_file)
except OSError:
pass
else:
open(output_file, 'a').close()
return test_dates
def read_zkey(zdb, sort):
"""
Reads z-index database file and returns the first or last key depending on sort order
Arguments:
zdb -- the z-index database file name
sort -- the sort order
"""
try:
db_exists = os.path.isfile(zdb)
if db_exists is False:
return None
else:
con = sqlite3.connect(zdb, timeout=60) # 1 minute timeout
cur = con.cursor()
# Check for existing key
cur.execute("SELECT key_str FROM ZINDEX ORDER BY key_str " + sort +
" LIMIT 1;")
try:
key = cur.fetchone()[0]
except:
return None
if con:
con.close()
return key
except sqlite3.Error as e:
if con:
con.rollback()
mssg = "%s:" % e.args[0]
print(mssg)
return None
def get_file_list(path):
files = []
for name in os.listdir(path):
filepath = os.path.join(path, name)
if os.path.isfile(filepath):
files.append(filepath)
return files
def get_layer_config(filepath, archive_config):
"""
Parses a layer config XML file and its associated environment config file
and returns a dict with relevant values. Generally, <TagName> turns into config['tag_name'].
Arguments:
filepath -- path to the layer config file
archive config -- path to the archive config file
"""
config = {}
# Get the layer, environment, and archive config DOMs
try:
with open(filepath, "r") as lc:
config_dom = xml.dom.minidom.parse(lc)
env_config = config_dom.getElementsByTagName(
"EnvironmentConfig")[0].firstChild.nodeValue
except IOError:
print("Cannot read file " + filepath)
return config
try:
with open(archive_config, "r") as archive:
archive_dom = xml.dom.minidom.parse(archive)
except IOError:
print("Cannot read file " + archive_config)
return config
# Get archive root path and the archive location
archive_root = config_dom.getElementsByTagName(
'ArchiveLocation')[0].attributes['root'].value
config['archive_basepath'] = next(
loc.getElementsByTagName('Location')[0].firstChild.nodeValue
for loc in archive_dom.getElementsByTagName('Archive')
if loc.attributes['id'].value == archive_root)
config['archive_location'] = os.path.join(
config['archive_basepath'],
config_dom.getElementsByTagName('ArchiveLocation')[0].firstChild.
nodeValue)
# Add everything we need from the layer config
config['prefix'] = config_dom.getElementsByTagName(
"FileNamePrefix")[0].firstChild.nodeValue
config['identifier'] = config_dom.getElementsByTagName(
"Identifier")[0].firstChild.nodeValue
config['time'] = config_dom.getElementsByTagName(
"Time")[0].firstChild.nodeValue
config['tiled_group_name'] = config_dom.getElementsByTagName(
"TiledGroupName")[0].firstChild.nodeValue
config['colormaps'] = config_dom.getElementsByTagName("ColorMap")
try:
config['empty_tile'] = config_dom.getElementsByTagName(
'EmptyTile')[0].firstChild.nodeValue
except IndexError:
config['empty_tile_size'] = config_dom.getElementsByTagName(
'EmptyTileSize')[0].firstChild.nodeValue
config['year_dir'] = False
try:
if config_dom.getElementsByTagName(
'ArchiveLocation')[0].attributes['year'].value == 'true':
config['year_dir'] = True
except KeyError:
pass
try:
config['vector_type'] = config_dom.getElementsByTagName(
'VectorType')[0].firstChild.nodeValue
config['vector_layer_contents'] = config_dom.getElementsByTagName(
'MapfileLayerContents')[0].firstChild.nodeValue
except IndexError:
pass
try:
with open(env_config, "r") as env:
env_dom = xml.dom.minidom.parse(env)
except IOError:
print("Cannot read file " + env_config)
return config
# Add everything we need from the environment config
staging_locations = env_dom.getElementsByTagName('StagingLocation')
config['wmts_staging_location'] = next(
(loc.firstChild.nodeValue for loc in staging_locations
if loc.attributes["service"].value == "wmts"), None)
config['twms_staging_location'] = next(
(loc.firstChild.nodeValue for loc in staging_locations
if loc.attributes["service"].value == "twms"), None)
config['cache_location'] = next(
(loc.firstChild.nodeValue
for loc in env_dom.getElementsByTagName("CacheLocation")
if loc.attributes["service"].value == "wmts"), None)
config['wmts_gc_path'] = next(
(loc.firstChild.nodeValue
for loc in env_dom.getElementsByTagName("GetCapabilitiesLocation")
if loc.attributes["service"].value == "wmts"), None)
config['twms_gc_path'] = next(
(loc.firstChild.nodeValue
for loc in env_dom.getElementsByTagName("GetCapabilitiesLocation")
if loc.attributes["service"].value == "twms"), None)
config['colormap_locations'] = [
loc for loc in env_dom.getElementsByTagName("ColorMapLocation")
]
config['legend_location'] = env_dom.getElementsByTagName(
'LegendLocation')[0].firstChild.nodeValue
try:
config['mapfile_location'] = env_dom.getElementsByTagName(
'MapfileLocation')[0].firstChild.nodeValue
config['mapfile_location_basename'] = env_dom.getElementsByTagName(
'MapfileLocation')[0].attributes["basename"].value
config['mapfile_staging_location'] = env_dom.getElementsByTagName(
'MapfileStagingLocation')[0].firstChild.nodeValue
except (IndexError, KeyError):
pass
return config
def get_time_string(start_datetime, end_datetime, config):
"""
Returns a GetCapabilities date search string for the given start and end datetimes.
"""
# Use those dates to create the search string we're looking for in the GC file
time_string = start_datetime.isoformat() + 'Z/' + end_datetime.isoformat(
) + 'Z'
# Check if a period length is added to the 'Time' tag.
detect_string = config['time'].split('/')
if detect_string[0].startswith('P'):
time_string = detect_string[0] + time_string + '/'
elif detect_string[-1].startswith('P'):
time_string += ('/' + detect_string[-1])
return time_string
def make_dir_tree(path, ignore_existing=False):
"""
Creates the specified directory tree. Throws an error
and doesn't do anything if there are already files in that dir.
Kind of like 'mkdir -p'.
Arguments:
path -- path to be created
"""
try:
os.makedirs(path)
except OSError:
if os.listdir(path):
if not ignore_existing:
raise OSError(
"Target directory {0} is not empty.".format(path))
else:
pass
else:
pass
return
def setup_test_layer(test_file_path, cache_path, prefix):
"""
Sets up a test imagery layer by copying the data files and the cache config file
to the specified directories. It also restarts Apache in order for these changes to take effect.
Arguments:
test_file_path -- the path of the dir where the test files are located.
cache_path -- the path of the dir where the cache files are located
prefix -- the prefix of the data and cache files that will be copied.
"""
make_dir_tree(os.path.join(cache_path, prefix))
# Copy MRF files to a new cache directory and the cache file to the root of the cache location
for file in os.listdir(test_file_path):
if os.path.isfile(os.path.join(test_file_path,
file)) and prefix in file:
if any(
ext for ext in ('.mrf', 'ppg', 'idx', '.pjg')
if ext in file):
copy(os.path.join(test_file_path, file), cache_path)
elif '_cache.config' in file:
copy(
os.path.join(test_file_path, file),
os.path.join(cache_path, 'cache_all_wmts.config'))
run_command('apachectl stop')
run_command('apachectl start')
return
def get_url(url):
"""
Grabs and returns a file from a url.
Arguments
url -- the URL of the file to be downloaded.
"""
try:
response = urllib.request.urlopen(url)
except urllib.error.URLError:
raise urllib.error.URLError('Cannot access URL: ' + url)
except http.client.RemoteDisconnected:
# multi-layer WMS requests would close connection prematurely
response = urllib.request.urlopen(url)
return response
def check_apache_running():
"""
Checks to see if Apache is running on the test machine, bails if it's not.
"""
# Greps for any running HTTPD processes
check = subprocess.Popen(
'ps -e | grep "httpd"',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if not check.stdout.read():
raise ValueError('Apache does not appear to be running.')
return True
def check_redis_running():
"""
Checks to see if redis is running on the test machine, bails if it's not.
"""
# Greps for any running redis-server processes
check = subprocess.Popen(
'ps -e | grep "redis-server"',
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if not check.stdout.read():
raise ValueError('Redis does not appear to be running.')
return True
def ordered_d(obj):
"""
Recursively sort any lists it finds (and convert dictionaries to lists of (key, value) pairs
"""
if isinstance(obj, dict):
return sorted((k, ordered_d(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered_d(x) for x in obj)
else:
return obj
def order_dict(dictionary):
return {k: order_dict(v) if isinstance(v, dict) else v
for k, v in sorted(dictionary.items())}
def check_dicts(d, ref_d):
"""
Checks to see if dict d is equivalent to dict ref_d.
Arguments:
d -- dict to compare
ref_d -- reference dict being compared against
"""
if order_dict(ref_d) == order_dict(d):
return True
else:
return False
def check_tile_request(url, ref_hash):
"""
Checks to see if Apache is running, downloads a tile from the specified URL,
and checks it against a hash value. Returns true or false.
Arguments
url -- the URL of the tile to be tested
ref_hash -- the hash that the file will be tested against.
"""
check_apache_running()
tile = get_url(url)
tile_hash = get_file_hash(tile)
print("tile_hash: " + tile_hash)
hash_check = tile_hash == ref_hash
return hash_check
def check_response_code(url, code, code_value=''):
"""
Checks the response code and optional code value returned from OnEarth against given criteria.
Arguments:
url -- URL to be requested.
code -- integer HTTP reponse code
code_value -- any text that should appear in the response from OnEarth
"""
check_apache_running()
try:
response = urllib.request.urlopen(url)
r_code = 200
except urllib.error.HTTPError as e:
r_code = e.code
response = e
if r_code == code and code_value in response.read().decode('utf-8'):
return True
return False
def check_layer_headers(test_obj, headers, expected_layer_id_req, expected_layer_id_actual, expected_layer_time_req, expected_layer_time_actual):
check_apache_running()
headers = dict(headers)
layer_id_req = headers['Layer-Identifier-Request']
layer_id_actual = headers['Layer-Identifier-Actual']
layer_time_req = headers['Layer-Time-Request']
layer_time_actual = headers['Layer-Time-Actual']
test_obj.assertEqual(layer_id_req, expected_layer_id_req, f'Tile header Layer-Identifier-Request is {layer_id_req} but expected {expected_layer_id_req}')
test_obj.assertEqual(layer_id_actual, expected_layer_id_actual, f'Tile header Layer-Identifier-Actual is {layer_id_actual} but expected {expected_layer_id_actual}')
test_obj.assertEqual(layer_time_req, expected_layer_time_req, f'Tile header Layer-Time-Request is {layer_time_req} but expected {expected_layer_time_req}')
test_obj.assertEqual(layer_time_actual, expected_layer_time_actual, f'Tile header Layer-Time-Actual is {layer_time_actual} but expected {expected_layer_time_actual}')
def check_wmts_error(url, code, hash):
"""
Checks WMTS error responses, which often return a HTTP error code and an XML response.
Arguments:
url (str)-- url to check
code (int) -- expected HTTP response code
hash (str) -- expected hash value of the response
"""
check_apache_running()
try:
response = urllib.request.urlopen(url)
r_code = 200
except urllib.error.HTTPError as e:
r_code = e.code
response = e.read()
if r_code == code:
hasher = hashlib.md5()
hasher.update(response)
hash_value = str(hasher.hexdigest())
return hash_value == hash
return False
def test_snap_request(hash_table, req_url):
"""
Requests the first tile for a given layer and date, then compares the result against a dict w/ dates
and hashes.
Arguments:
hash_table -- a dict with date keys and hash values.
req_url -- a string with the url that's to be used for the request
"""
tile = get_url(req_url)
tile_hash = get_file_hash(tile)
print("tile_hash: " + tile_hash)
tile_date = hash_table.get(tile_hash, '')
return tile_date
def get_xml(file):
"""
Opens an XML file, parses it, and returns a DOM object.
Returns 'None' if not valid XML.
Arguments:
file -- file to be opened.
"""
with open(file, 'r') as f:
try:
dom = xml.dom.minidom.parse(f)
except xml.parsers.expat.ExpatError:
return None
return dom
def file_text_replace(infile, outfile, before, after):
"""
Replaces text in given file and saves the output to the
location specified.
Arguments:
infile -- input file path
outfile -- path of new file to be saved (deletes existing)
before -- string to search for
after -- string to replace 'before' with
"""
with open(infile, 'r') as template:
newfile = template.read().replace(before, after)
with open(outfile, 'w') as out:
out.write(newfile)
def check_valid_mvt(file, warn_if_empty=False):
tile_buffer = io.BytesIO()
tile_buffer.write(file.read())
tile_buffer.seek(0)
try:
unzipped_tile = gzip.GzipFile(fileobj=tile_buffer)
tile_data = unzipped_tile.read()
except IOError:
return False
try:
tile = mapbox_vector_tile.decode(tile_data)
except:
return False
if warn_if_empty:
try:
num_features = len(tile[list(tile.keys())[0]]['features'])
except IndexError:
return False
return True
def test_wmts_error(test_obj, test_url, error_code_expected,
exception_code_expected, locator_expected,
exception_text_expected):
r = requests.get(test_url)
test_obj.assertEqual(
error_code_expected,
r.status_code,
msg='Unexpected error code -- should be {0}, is {1}'.format(
error_code_expected, str(r.status_code)))
content_type = r.headers.get('content-type')
test_obj.assertEqual(
'text/xml',
content_type,
msg='Unexpected content type, should be {0}, is {1}'.format(
'text/xml', content_type))
try:
err_xml = etree.fromstring(r.content)
except etree.XMLSyntaxError:
test_obj.fail('Invalid XML returned for error message')
# Check root element attributes
expected_namespace = '{http://www.opengis.net/ows/1.1}'
root_element_expected_value = expected_namespace + 'ExceptionReport'
test_obj.assertEqual(
root_element_expected_value,
err_xml.tag,
msg='Invalid root element or namespace, should be {0}, is {1}'.format(
root_element_expected_value, err_xml.tag))
schema_location_found = err_xml.attrib.get(
'{http://www.w3.org/2001/XMLSchema-instance}schemaLocation')
test_obj.assertIsNotNone(
schema_location_found,
msg='Missing schemaLocation attribute from ExceptionReport element')
schema_location_expected = 'http://schemas.opengis.net/ows/1.1.0/owsExceptionReport.xsd'
test_obj.assertEqual(
schema_location_expected,
schema_location_found,
msg=
'Invalid schemaLocation attribute for ExceptionReport element, should be {0}, is {1}'
.format(schema_location_expected, schema_location_found))
version_found = err_xml.attrib.get('version')
test_obj.assertIsNotNone(
version_found,
msg='Missing version attribute for ExceptionReport element')
version_expected = '1.1.0'
test_obj.assertEqual(
version_expected,
version_found,
msg=
'Invalid version attribute for ExceptionReport element, should be {0}, is {1}'
.format(version_expected, version_found))
lang_found = err_xml.attrib.get(
'{http://www.w3.org/XML/1998/namespace}lang')
test_obj.assertIsNotNone(
lang_found,
msg='Missing xml:lang attribute from ExceptionReport element')
lang_expected = 'en'
test_obj.assertEqual(
lang_expected,
lang_found,
msg=
'Invalid xml:lang attribute for ExceptionReport element, should be {0}, is {1}'
.format(lang_expected, lang_found))
# Check <Exception> content
exception_element = err_xml.find(expected_namespace + 'Exception')
test_obj.assertIsNotNone(
exception_element, msg='Missing Exception element')
exception_code_found = exception_element.attrib.get('exceptionCode')
test_obj.assertIsNotNone(
exception_code_found,
msg='Mising exceptionCode attribute for Exception element')
test_obj.assertEqual(
exception_code_expected,
exception_code_found,
msg=
'Invalid exceptionCode attribute for Exception element, should be {0}, is {1}'
.format(exception_code_expected, exception_code_found))
locator_found = exception_element.attrib.get('locator')
test_obj.assertIsNotNone(
locator_found, msg='Mising locator attribute for Exception element')
# locator_expected = 'LAYER'
test_obj.assertEqual(
locator_expected,
locator_found,
msg=
'Invalid locator attribute for Exception element, should be {0}, is {1}'
.format(locator_expected, locator_found))
# Check <ExceptionText> content
exception_text_element = exception_element.find(expected_namespace +
'ExceptionText')
test_obj.assertIsNotNone(
exception_text_element, msg='Missing ExceptionText element')
exception_text_found = exception_text_element.text
test_obj.assertIsNotNone(
exception_text_found, msg='Missing ExceptionText text content')
test_obj.assertEqual(
exception_text_expected,
exception_text_found,
msg=
'Invalid text content for ExceptionText element, should be {0}, is {1}'
.format(exception_text_expected, exception_text_found))
def bulk_replace(source_str, replace_list):
out_str = source_str
for item in replace_list:
out_str = out_str.replace(item[0], str(item[1]))
return out_str
def redis_running():
try:
r = redis.StrictRedis(host='localhost', port=6379, db=0)
return r.ping()
except redis.exceptions.ConnectionError:
return False
def seed_redis_data(layers, db_keys=None):
r = redis.StrictRedis(host='localhost', port=6379, db=0)
db_keystring = ''
if db_keys:
for key in db_keys:
db_keystring += key + ':'
for layer in layers:
r.set('{0}layer:{1}:default'.format(db_keystring, layer[0]), layer[1])
periods = [layer[2]] if not isinstance(layer[2], list) else layer[2]
for period in periods:
r.sadd('{0}layer:{1}:periods'.format(db_keystring, layer[0]),
period)
def seed_redis_best_data(layers, filename, db_keys=None):
r = redis.StrictRedis(host='localhost', port=6379, db=0)
db_keystring = ''
if db_keys:
for key in db_keys:
db_keystring += key + ':'
for layer in layers:
r.hmset('{0}layer:{1}:best'.format(db_keystring, layer[0]), {layer[4]:filename})
def remove_redis_layer(layers, db_keys=None):
for layer in layers:
r = redis.StrictRedis(host='localhost', port=6379, db=0)
db_keystring = ''
if db_keys:
for key in db_keys:
db_keystring += key + ':'
r.delete('{0}layer:{1}:default'.format(db_keystring, layer[0]))
r.delete('{0}layer:{1}:periods'.format(db_keystring, layer[0]))
| nasa-gibs/onearth | src/test/oe_test_utils.py | Python | apache-2.0 | 44,219 |
VERSION = '0.3.4'
| jarpy/lambkin | lambkin/version.py | Python | apache-2.0 | 18 |
"""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
import idna
import urllib3
from . import __version__ as requests_version
try:
import charset_normalizer
except ImportError:
charset_normalizer = None
chardet = None
try:
from urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and version.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def info():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
urllib3_info = {'version': urllib3.__version__}
charset_normalizer_info = {'version': None}
chardet_info = {'version': None}
if charset_normalizer:
charset_normalizer_info = {'version': charset_normalizer.__version__}
if chardet:
chardet_info = {'version': chardet.__version__}
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
idna_info = {
'version': getattr(idna, '__version__', ''),
}
system_ssl = ssl.OPENSSL_VERSION_NUMBER
system_ssl_info = {
'version': '%x' % system_ssl if system_ssl is not None else ''
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': system_ssl_info,
'using_pyopenssl': pyopenssl is not None,
'using_charset_normalizer': chardet is None,
'pyOpenSSL': pyopenssl_info,
'urllib3': urllib3_info,
'chardet': chardet_info,
'charset_normalizer': charset_normalizer_info,
'cryptography': cryptography_info,
'idna': idna_info,
'requests': {
'version': requests_version,
},
}
def main():
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == '__main__':
main()
| cloudera/hue | desktop/core/ext-py3/requests-2.27.1/requests/help.py | Python | apache-2.0 | 3,920 |
# -*- coding: utf-8 -*-
from PyQt5.QtCore import Qt
from dgp.core.controllers.project_controllers import AirborneProjectController
from .base import WorkspaceTab
class ProjectTab(WorkspaceTab):
def __init__(self, project: AirborneProjectController, parent=None):
super().__init__(parent=parent, flags=Qt.Widget)
self.project = project
@property
def title(self) -> str:
return f'{self.project.get_attr("name")}'
@property
def uid(self):
return self.project.uid
| DynamicGravitySystems/DGP | dgp/gui/workspaces/project.py | Python | apache-2.0 | 517 |
#!/home/firlism/tools/css_platform/sleepyenv/bin/python
# EASY-INSTALL-SCRIPT: 'Pillow==2.8.2','pilfont.py'
__requires__ = 'Pillow==2.8.2'
__import__('pkg_resources').run_script('Pillow==2.8.2', 'pilfont.py')
| hexlism/css_platform | sleepyenv/bin/pilfont.py | Python | apache-2.0 | 209 |
from __future__ import unicode_literals
from moto.acm import acm_backends
from moto.apigateway import apigateway_backends
from moto.autoscaling import autoscaling_backends
from moto.awslambda import lambda_backends
from moto.cloudformation import cloudformation_backends
from moto.cloudwatch import cloudwatch_backends
from moto.cognitoidentity import cognitoidentity_backends
from moto.cognitoidp import cognitoidp_backends
from moto.core import moto_api_backends
from moto.datapipeline import datapipeline_backends
from moto.dynamodb import dynamodb_backends
from moto.dynamodb2 import dynamodb_backends2
from moto.dynamodbstreams import dynamodbstreams_backends
from moto.ec2 import ec2_backends
from moto.ecr import ecr_backends
from moto.ecs import ecs_backends
from moto.elb import elb_backends
from moto.elbv2 import elbv2_backends
from moto.emr import emr_backends
from moto.events import events_backends
from moto.glacier import glacier_backends
from moto.glue import glue_backends
from moto.iam import iam_backends
from moto.instance_metadata import instance_metadata_backends
from moto.kinesis import kinesis_backends
from moto.kms import kms_backends
from moto.logs import logs_backends
from moto.opsworks import opsworks_backends
from moto.organizations import organizations_backends
from moto.polly import polly_backends
from moto.rds2 import rds2_backends
from moto.redshift import redshift_backends
from moto.resourcegroups import resourcegroups_backends
from moto.route53 import route53_backends
from moto.s3 import s3_backends
from moto.ses import ses_backends
from moto.secretsmanager import secretsmanager_backends
from moto.sns import sns_backends
from moto.sqs import sqs_backends
from moto.ssm import ssm_backends
from moto.sts import sts_backends
from moto.swf import swf_backends
from moto.xray import xray_backends
from moto.iot import iot_backends
from moto.iotdata import iotdata_backends
from moto.batch import batch_backends
from moto.resourcegroupstaggingapi import resourcegroupstaggingapi_backends
from moto.config import config_backends
BACKENDS = {
'acm': acm_backends,
'apigateway': apigateway_backends,
'autoscaling': autoscaling_backends,
'batch': batch_backends,
'cloudformation': cloudformation_backends,
'cloudwatch': cloudwatch_backends,
'cognito-identity': cognitoidentity_backends,
'cognito-idp': cognitoidp_backends,
'config': config_backends,
'datapipeline': datapipeline_backends,
'dynamodb': dynamodb_backends,
'dynamodb2': dynamodb_backends2,
'dynamodbstreams': dynamodbstreams_backends,
'ec2': ec2_backends,
'ecr': ecr_backends,
'ecs': ecs_backends,
'elb': elb_backends,
'elbv2': elbv2_backends,
'events': events_backends,
'emr': emr_backends,
'glacier': glacier_backends,
'glue': glue_backends,
'iam': iam_backends,
'moto_api': moto_api_backends,
'instance_metadata': instance_metadata_backends,
'logs': logs_backends,
'kinesis': kinesis_backends,
'kms': kms_backends,
'opsworks': opsworks_backends,
'organizations': organizations_backends,
'polly': polly_backends,
'redshift': redshift_backends,
'resource-groups': resourcegroups_backends,
'rds': rds2_backends,
's3': s3_backends,
's3bucket_path': s3_backends,
'ses': ses_backends,
'secretsmanager': secretsmanager_backends,
'sns': sns_backends,
'sqs': sqs_backends,
'ssm': ssm_backends,
'sts': sts_backends,
'swf': swf_backends,
'route53': route53_backends,
'lambda': lambda_backends,
'xray': xray_backends,
'resourcegroupstaggingapi': resourcegroupstaggingapi_backends,
'iot': iot_backends,
'iot-data': iotdata_backends,
}
def get_model(name, region_name):
for backends in BACKENDS.values():
for region, backend in backends.items():
if region == region_name:
models = getattr(backend.__class__, '__models__', {})
if name in models:
return list(getattr(backend, models[name])())
| whummer/moto | moto/backends.py | Python | apache-2.0 | 4,058 |
def cavity(l,n):
for i in xrange(1,n-1):
for j in xrange(1,n-1):
if l[i-1][j]!='X' and l[i][j-1]!='X' and l[i+1][j]!='X' and l[i][j+1]!='X' and l[i][j]>l[i-1][j] and l[i][j]>l[i+1][j] and l[i][j]>l[i][j-1] and l[i][j]>l[i][j+1]:
l[i][j]='X'
if __name__ == '__main__':
n = input()
p = []
for _ in xrange(n):
line = list(raw_input())
p.append(line)
cavity(p, n)
for line in p:
print ''.join(line)
| arriqaaq/hackerrank | Algo- Warmup/cavity-rank.py | Python | apache-2.0 | 508 |
import json
import pyaml
import yaml
from lib.util import Util
from lib.parser import Parser
import logging
import traceback
import glob
import os
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger('ToscanfvParser')
class ToscanfvParser(Parser):
"""Parser methods for toscanfv project type
"""
def __init__(self):
super(ToscanfvParser, self).__init__()
@classmethod
def importprojectdir(cls,dir_project, file_type):
"""Imports all descriptor files under a given folder
this method is specific for Toscanfv project type
"""
project = {
'toscayaml':{},
'positions': {}
}
for desc_type in project:
cur_type_path = os.path.join(dir_project, desc_type.upper())
log.debug(cur_type_path)
if os.path.isdir(cur_type_path):
for file in glob.glob(os.path.join(cur_type_path, '*.'+file_type)):
if file_type == 'json':
project[desc_type][os.path.basename(file).split('.')[0]] = Util.loadjsonfile(file)
elif file_type == 'yaml':
project[desc_type][os.path.basename(file).split('.')[0]] = Util.loadyamlfile(file)
for vertices_file in glob.glob(os.path.join(dir_project, '*.json')):
if os.path.basename(vertices_file) == 'vertices.json':
project['positions']['vertices'] = Util.loadjsonfile(vertices_file)
return project
@classmethod
def importprojectfiles(cls, file_dict):
"""Imports descriptors (extracted from the new project POST)
The keys in the dictionary are the file types
"""
project = {
'toscayaml':{},
}
for desc_type in project:
if desc_type in file_dict:
files_desc_type = file_dict[desc_type]
for file in files_desc_type:
project[desc_type][os.path.splitext(file.name)[0]] = json.loads(file.read())
return project | superfluidity/RDCL3D | code/lib/toscanfv/toscanfv_parser.py | Python | apache-2.0 | 2,078 |
OPEN = "mate-open"
FILE_MANAGER = "caja"
| hanya/BookmarksMenu | pythonpath/bookmarks/env/mate.py | Python | apache-2.0 | 42 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from page_objects import *
from selenium import webdriver
import datetime
import time
import Queue
import threading
import traceback
tenders = Queue.Queue()
tenders_ids = []
tenders_threads = 2
bids = Queue.Queue()
bids_failed = {}
runs = Queue.Queue()
class CreateTenders(threading.Thread):
exited = False
def __init__(self, queue, driver):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.login_page_owner = LoginPage(
owner_users['email'], owner_users['password'], self.driver
)
self.create_tender_page = CreateTenderPage(self.driver)
self.find_tender = FindTenderPage(self.driver)
def run(self):
while True:
# Wait for start
self.queue.get()
# Process business logic
self.driver.get(broker['url'])
try:
self.login_page_owner.login_as_owner()
self.driver.get(create_tender_url)
self.create_tender_page.create_tender()
tenders_ids.append(self.find_tender.get_tender_id())
except Exception as error:
# self.driver.close()
self.exited = True
print (error)
traceback.print_exc()
raise error
finally:
self.queue.task_done()
class MakeTendersBids(threading.Thread):
exited = False
def __init__(self, queue, user, password, tender_id, driver):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.tender_id = tender_id
self.login_page_provider = LoginPage(user, password, self.driver)
self.find_tender = FindTenderPage(self.driver)
self.make_bid_page = MakeBidPage(self.driver)
def run(self):
while True:
# Wait for start
self.queue.get()
self.driver.get(broker['url'])
# Process business logic
try:
self.login_page_provider.login_as_provider()
self.find_tender.find_tender(self.tender_id)
if not self.make_bid_page.make_bid():
bids_failed[self.tender_id] = 'failed'
print('Bid failed for tender: {}'.format(self.tender_id))
return
bids_failed[self.tender_id] = 'passed'
print('Bid success for tender {}'.format(self.tender_id))
except Exception as error:
# self.driver.close()
self.exited = False
print(error)
traceback.print_exc()
raise error
finally:
self.queue.task_done()
class RunTenderBids(threading.Thread):
def __init__(self, queue, driver, providerAndTender):
threading.Thread.__init__(self)
self.queue = queue
self.driver = driver
self.make_bid_page = MakeBidPage(self.driver)
self.providerAndTender = providerAndTender
def run(self):
while True:
# Wait for start
self.queue.get()
# Process business logic
try:
with open('load_results.txt', 'a') as fl:
fl.write('{} started bid for {} —---------------- STARTED\n'.format(self.providerAndTender, datetime.datetime.now()))
self.make_bid_page.run_bid()
fl.write('{} made bid for {} —---------------- FINISHED\n'.format(self.providerAndTender, datetime.datetime.now()))
fl.close()
finally:
self.queue.task_done()
start = time.time()
# Start creating tenders
print('Start creating tenders...')
for i in range(tenders_threads):
driver = webdriver.Chrome()
driver.set_window_size(1200, 1000)
t = CreateTenders(tenders, driver)
t.setDaemon(True)
t.start()
for i in range(tenders_threads):
tenders.put(True)
# Wait for all to complete
tenders.join()
print('Tenders created - ' + ', '.join(tenders_ids))
# Start making tenders bids
print('Start making bids...')
drivers = {}
for tid in tenders_ids:
for provider in provider_users.items():
driver = webdriver.Chrome()
driver.set_window_size(1200, 1000)
drivers['{} {}'.format(provider[0], tid)] = driver
b = MakeTendersBids(bids, provider[0], provider[1], tid, driver)
b.setDaemon(True)
print(provider[0], tid)
b.start()
for tid in tenders_ids:
for provider in provider_users.items():
bids.put(True)
bids.join()
print('Bids made')
print(bids_failed)
with open('load_results.txt', 'a') as f:
f.write('{} failed \n'.format(bids_failed))
f.close()
# Start making by clicking simultaneously
print('Start running bids...')
for driver in drivers.keys():
c = RunTenderBids(runs, drivers[driver], driver)
c.setDaemon(True)
c.start()
for driver in drivers:
runs.put(True)
runs.join()
print('Runs performed')
print("Elapsed Time: %s" % (time.time() - start))
for driver in drivers:
drivers[driver].close()
| lesiavl/selenium_perfomance_tests | pzo_load/load_test_file.py | Python | apache-2.0 | 5,242 |
# Copyright (c) 2012 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the aggregates admin api."""
from webob import exc
from nova.api.openstack.compute.contrib import aggregates
from nova import context
from nova import exception
from nova import test
from nova.tests import matchers
AGGREGATE_LIST = [
{"name": "aggregate1", "id": "1", "availability_zone": "nova1"},
{"name": "aggregate2", "id": "2", "availability_zone": "nova1"},
{"name": "aggregate3", "id": "3", "availability_zone": "nova2"},
{"name": "aggregate1", "id": "4", "availability_zone": "nova1"}]
AGGREGATE = {"name": "aggregate1",
"id": "1",
"availability_zone": "nova1",
"metadata": {"foo": "bar"},
"hosts": ["host1, host2"]}
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class AggregateTestCase(test.NoDBTestCase):
"""Test Case for aggregates admin api."""
def setUp(self):
super(AggregateTestCase, self).setUp()
self.controller = aggregates.AggregateController()
self.req = FakeRequest()
self.context = self.req.environ['nova.context']
def test_index(self):
def stub_list_aggregates(context):
if context is None:
raise Exception()
return AGGREGATE_LIST
self.stubs.Set(self.controller.api, 'get_aggregate_list',
stub_list_aggregates)
result = self.controller.index(self.req)
self.assertEqual(AGGREGATE_LIST, result["aggregates"])
def test_create(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual("nova1", availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_with_duplicate_aggregate_name(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.AggregateNameExists(aggregate_name=name)
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_incorrect_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
raise exception.InvalidAggregateAction(action='create_aggregate',
aggregate_id="'N/A'",
reason='invalid zone')
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
self.assertRaises(exception.InvalidAggregateAction,
self.controller.create,
self.req, {"aggregate":
{"name": "test",
"availability_zone": "nova_bad"}})
def test_create_with_no_aggregate(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"foo":
{"name": "test",
"availability_zone": "nova1"}})
def test_create_with_no_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"foo": "test",
"availability_zone": "nova1"}})
def test_create_with_no_availability_zone(self):
def stub_create_aggregate(context, name, availability_zone):
self.assertEqual(context, self.context, "context")
self.assertEqual("test", name, "name")
self.assertEqual(None, availability_zone, "availability_zone")
return AGGREGATE
self.stubs.Set(self.controller.api, "create_aggregate",
stub_create_aggregate)
result = self.controller.create(self.req,
{"aggregate": {"name": "test"}})
self.assertEqual(AGGREGATE, result["aggregate"])
def test_create_with_null_name(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "",
"availability_zone": "nova1"}})
def test_create_with_name_too_long(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, {"aggregate":
{"name": "x" * 256,
"availability_zone": "nova1"}})
def test_create_with_extra_invalid_arg(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.create,
self.req, dict(name="test",
availability_zone="nova1",
foo='bar'))
def test_show(self):
def stub_get_aggregate(context, id):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", id, "id")
return AGGREGATE
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
aggregate = self.controller.show(self.req, "1")
self.assertEqual(AGGREGATE, aggregate["aggregate"])
def test_show_with_invalid_id(self):
def stub_get_aggregate(context, id):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, 'get_aggregate',
stub_get_aggregate)
self.assertRaises(exc.HTTPNotFound,
self.controller.show, self.req, "2")
def test_update(self):
body = {"aggregate": {"name": "new_name",
"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual(body["aggregate"], values, "values")
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_only_name(self):
body = {"aggregate": {"name": "new_name"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_only_availability_zone(self):
body = {"aggregate": {"availability_zone": "nova1"}}
def stub_update_aggregate(context, aggregate, values):
return AGGREGATE
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
result = self.controller.update(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_update_with_no_updates(self):
test_metadata = {"aggregate": {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_no_update_key(self):
test_metadata = {"asdf": {}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_wrong_updates(self):
test_metadata = {"aggregate": {"status": "disable",
"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_null_name(self):
test_metadata = {"aggregate": {"name": ""}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_name_too_long(self):
test_metadata = {"aggregate": {"name": "x" * 256}}
self.assertRaises(exc.HTTPBadRequest, self.controller.update,
self.req, "2", body=test_metadata)
def test_update_with_bad_aggregate(self):
test_metadata = {"aggregate": {"name": "test_name"}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=2)
self.stubs.Set(self.controller.api, "update_aggregate",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.update,
self.req, "2", body=test_metadata)
def test_invalid_action(self):
body = {"append_host": {"host": "host1"}}
self.assertRaises(exc.HTTPBadRequest,
self.controller.action, self.req, "1", body=body)
def test_add_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
return AGGREGATE
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
aggregate = self.controller.action(self.req, "1",
body={"add_host": {"host":
"host1"}})
self.assertEqual(aggregate["aggregate"], AGGREGATE)
def test_add_host_with_already_added_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateHostExists(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPConflict, self.controller.action,
self.req, "1",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_aggregate(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bogus_aggregate",
body={"add_host": {"host": "host1"}})
def test_add_host_with_bad_host(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1",
body={"add_host": {"host": "bogus_host"}})
def test_add_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"add_host": {"asdf": "asdf"}})
def test_add_host_raises_key_error(self):
def stub_add_host_to_aggregate(context, aggregate, host):
raise KeyError
self.stubs.Set(self.controller.api, "add_host_to_aggregate",
stub_add_host_to_aggregate)
#NOTE(mtreinish) The check for a KeyError here is to ensure that
# if add_host_to_aggregate() raises a KeyError it propagates. At
# one point the api code would mask the error as a HTTPBadRequest.
# This test is to ensure that this doesn't occur again.
self.assertRaises(KeyError, self.controller.action, self.req, "1",
body={"add_host": {"host": "host1"}})
def test_remove_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertEqual("host1", host, "host")
stub_remove_host_from_aggregate.called = True
return {}
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.controller.action(self.req, "1",
body={"remove_host": {"host": "host1"}})
self.assertTrue(stub_remove_host_from_aggregate.called)
def test_remove_host_with_bad_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bogus_aggregate",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_host_not_in_aggregate(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.AggregateHostNotFound(aggregate_id=aggregate,
host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1",
body={"remove_host": {"host": "host1"}})
def test_remove_host_with_bad_host(self):
def stub_remove_host_from_aggregate(context, aggregate, host):
raise exception.ComputeHostNotFound(host=host)
self.stubs.Set(self.controller.api,
"remove_host_from_aggregate",
stub_remove_host_from_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "1", body={"remove_host": {"host": "bogushost"}})
def test_remove_host_with_missing_host(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"asdf": "asdf"})
def test_remove_host_with_extra_param(self):
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body={"remove_host": {"asdf": "asdf",
"host": "asdf"}})
def test_set_metadata(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, values):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
self.assertThat(body["set_metadata"]['metadata'],
matchers.DictMatches(values))
return AGGREGATE
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
result = self.controller.action(self.req, "1", body=body)
self.assertEqual(AGGREGATE, result["aggregate"])
def test_set_metadata_with_bad_aggregate(self):
body = {"set_metadata": {"metadata": {"foo": "bar"}}}
def stub_update_aggregate(context, aggregate, metadata):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api,
"update_aggregate_metadata",
stub_update_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.action,
self.req, "bad_aggregate", body=body)
def test_set_metadata_with_missing_metadata(self):
body = {"asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body=body)
def test_set_metadata_with_extra_params(self):
body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}}
self.assertRaises(exc.HTTPBadRequest, self.controller.action,
self.req, "1", body=body)
def test_delete_aggregate(self):
def stub_delete_aggregate(context, aggregate):
self.assertEqual(context, self.context, "context")
self.assertEqual("1", aggregate, "aggregate")
stub_delete_aggregate.called = True
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.controller.delete(self.req, "1")
self.assertTrue(stub_delete_aggregate.called)
def test_delete_aggregate_with_bad_aggregate(self):
def stub_delete_aggregate(context, aggregate):
raise exception.AggregateNotFound(aggregate_id=aggregate)
self.stubs.Set(self.controller.api, "delete_aggregate",
stub_delete_aggregate)
self.assertRaises(exc.HTTPNotFound, self.controller.delete,
self.req, "bogus_aggregate")
| OpenAcademy-OpenStack/nova-scheduler | nova/tests/api/openstack/compute/contrib/test_aggregates.py | Python | apache-2.0 | 18,751 |
# syft absolute
import syft as sy
from syft.lib.python.string import String
from syft.proto.lib.python.string_pb2 import String as String_PB
def test_string_serde() -> None:
syft_string = String("Hello OpenMined")
serialized = syft_string._object2proto()
assert isinstance(serialized, String_PB)
deserialized = String._proto2object(proto=serialized)
assert isinstance(deserialized, String)
assert deserialized.id == syft_string.id
def test_string_send(client: sy.VirtualMachineClient) -> None:
syft_string = String("Hello OpenMined!")
ptr = syft_string.send(client)
# Check pointer type
assert ptr.__class__.__name__ == "StringPointer"
# Check that we can get back the object
res = ptr.get()
assert res == syft_string
| OpenMined/PySyft | packages/syft/tests/syft/lib/python/string/string_serde_test.py | Python | apache-2.0 | 780 |
#
# Copyright 2013 Radware LTD.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.v2 import attributes as attrs
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db.loadbalancer import loadbalancer_db as ldb
from neutron.db.loadbalancer import loadbalancer_db_mixin as lb_db
from neutron.db import servicetype_db as st_db
from neutron.extensions import loadbalancer
from neutron.extensions import lbaas_agentscheduler
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
from neutron.plugins.common import constants
from neutron.services.loadbalancer import agent_scheduler
from neutron.services import provider_configuration as pconf
from neutron.services import service_base
LOG = logging.getLogger(__name__)
class LoadBalancerPlugin(lb_db.LoadBalancer_db_mixin,
agent_scheduler.LbaasAgentSchedulerDbMixin):
"""Implementation of the Neutron Loadbalancer Service Plugin.
This class manages the workflow of LBaaS request/response.
Most DB related works are implemented in class
loadbalancer_db.LoadBalancerPluginDb.
"""
supported_extension_aliases = ["lbaas",
"lbaas_agent_scheduler",
"service-type"]
# lbaas agent notifiers to handle agent update operations;
# can be updated by plugin drivers while loading;
# will be extracted by neutron manager when loading service plugins;
agent_notifiers = {}
def __init__(self):
"""Initialization for the loadbalancer service plugin."""
self.service_type_manager = st_db.ServiceTypeManager.get_instance()
self._load_drivers()
def _load_drivers(self):
"""Loads plugin-drivers specified in configuration."""
self.drivers, self.default_provider = service_base.load_drivers(
constants.LOADBALANCER, self)
# we're at the point when extensions are not loaded yet
# so prevent policy from being loaded
ctx = context.get_admin_context(load_admin_roles=False)
# stop service in case provider was removed, but resources were not
self._check_orphan_pool_associations(ctx, self.drivers.keys())
def _check_orphan_pool_associations(self, context, provider_names):
"""Checks remaining associations between pools and providers.
If admin has not undeployed resources with provider that was deleted
from configuration, neutron service is stopped. Admin must delete
resources prior to removing providers from configuration.
"""
pools = self.get_pools(context)
lost_providers = set([pool['provider'] for pool in pools
if pool['provider'] not in provider_names])
# resources are left without provider - stop the service
if lost_providers:
msg = _("Delete associated loadbalancer pools before "
"removing providers %s") % list(lost_providers)
LOG.exception(msg)
raise SystemExit(1)
def _get_driver_for_provider(self, provider):
if provider in self.drivers:
return self.drivers[provider]
# raise if not associated (should never be reached)
raise n_exc.Invalid(_("Error retrieving driver for provider %s") %
provider)
def _get_driver_for_pool(self, context, pool_id):
pool = self.get_pool(context, pool_id)
try:
return self.drivers[pool['provider']]
except KeyError:
raise n_exc.Invalid(_("Error retrieving provider for pool %s") %
pool_id)
def get_plugin_type(self):
return constants.LOADBALANCER
def get_plugin_description(self):
return "Neutron LoadBalancer Service Plugin"
def create_vip(self, context, vip):
v = super(LoadBalancerPlugin, self).create_vip(context, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.create_vip(context, v)
return v
def update_vip(self, context, id, vip):
if 'status' not in vip['vip']:
vip['vip']['status'] = constants.PENDING_UPDATE
old_vip = self.get_vip(context, id)
v = super(LoadBalancerPlugin, self).update_vip(context, id, vip)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.update_vip(context, old_vip, v)
return v
def _delete_db_vip(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_vip(context, id)
def delete_vip(self, context, id):
self.update_status(context, ldb.Vip,
id, constants.PENDING_DELETE)
v = self.get_vip(context, id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.delete_vip(context, v)
def _get_provider_name(self, context, pool):
if ('provider' in pool and
pool['provider'] != attrs.ATTR_NOT_SPECIFIED):
provider_name = pconf.normalize_provider_name(pool['provider'])
self.validate_provider(provider_name)
return provider_name
else:
if not self.default_provider:
raise pconf.DefaultServiceProviderNotFound(
service_type=constants.LOADBALANCER)
return self.default_provider
def create_pool(self, context, pool):
provider_name = self._get_provider_name(context, pool['pool'])
p = super(LoadBalancerPlugin, self).create_pool(context, pool)
self.service_type_manager.add_resource_association(
context,
constants.LOADBALANCER,
provider_name, p['id'])
#need to add provider name to pool dict,
#because provider was not known to db plugin at pool creation
p['provider'] = provider_name
driver = self.drivers[provider_name]
try:
driver.create_pool(context, p)
except loadbalancer.NoEligibleBackend:
# that should catch cases when backend of any kind
# is not available (agent, appliance, etc)
self.update_status(context, ldb.Pool,
p['id'], constants.ERROR,
"No eligible backend")
raise loadbalancer.NoEligibleBackend(pool_id=p['id'])
return p
def update_pool(self, context, id, pool):
if 'status' not in pool['pool']:
pool['pool']['status'] = constants.PENDING_UPDATE
old_pool = self.get_pool(context, id)
p = super(LoadBalancerPlugin, self).update_pool(context, id, pool)
driver = self._get_driver_for_provider(p['provider'])
driver.update_pool(context, old_pool, p)
return p
def _delete_db_pool(self, context, id):
# proxy the call until plugin inherits from DBPlugin
# rely on uuid uniqueness:
try:
with context.session.begin(subtransactions=True):
self.service_type_manager.del_resource_associations(
context, [id])
super(LoadBalancerPlugin, self).delete_pool(context, id)
except Exception:
# that should not happen
# if it's still a case - something goes wrong
# log the error and mark the pool as ERROR
LOG.error(_('Failed to delete pool %s, putting it in ERROR state'),
id)
with excutils.save_and_reraise_exception():
self.update_status(context, ldb.Pool,
id, constants.ERROR)
def delete_pool(self, context, id):
# check for delete conditions and update the status
# within a transaction to avoid a race
with context.session.begin(subtransactions=True):
self.update_status(context, ldb.Pool,
id, constants.PENDING_DELETE)
self._ensure_pool_delete_conditions(context, id)
p = self.get_pool(context, id)
driver = self._get_driver_for_provider(p['provider'])
driver.delete_pool(context, p)
def add_pool_to_lbaas_agent(self, context, agent_id, pool_id):
agent_db = super(LoadBalancerPlugin, self).add_pool_to_lbaas_agent(
context, agent_id, pool_id)
pool = self.get_pool(context, pool_id)
driver = self._get_driver_for_provider(pool['provider'])
driver.add_pool_to_host(context, pool, agent_db.host)
def remove_pool_from_lbaas_agent(self, context, agent_id, pool_id):
super(LoadBalancerPlugin, self).remove_pool_from_lbaas_agent(
context, agent_id, pool_id)
pool = self.get_pool(context, pool_id)
agent = self._get_agent(context, agent_id)
driver = self._get_driver_for_provider(pool['provider'])
driver.remove_pool_from_host(context, pool, agent.host)
def create_member(self, context, member):
m = super(LoadBalancerPlugin, self).create_member(context, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.create_member(context, m)
LOG.debug(_("create_member_is %s"),m)
return m
def update_member(self, context, id, member):
if 'status' not in member['member']:
member['member']['status'] = constants.PENDING_UPDATE
old_member = self.get_member(context, id)
m = super(LoadBalancerPlugin, self).update_member(context, id, member)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.update_member(context, old_member, m)
return m
def _get_pool(self,context,id):
return super(LoadBalancerPlugin, self).get_pool(context, id)
def _delete_db_member(self, context, id):
# proxy the call until plugin inherits from DBPlugin
super(LoadBalancerPlugin, self).delete_member(context, id)
def delete_member(self, context, id):
self.update_status(context, ldb.Member,
id, constants.PENDING_DELETE)
m = self.get_member(context, id)
driver = self._get_driver_for_pool(context, m['pool_id'])
driver.delete_member(context, m)
def create_vip_listener(self, context, listener, vip_id=None):
l = super(LoadBalancerPlugin, self).create_vip_listener(context,
listener, vip_id)
v = self.get_vip(context, vip_id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.create_vip_listener(context, l, v['pool_id'])
LOG.debug(_("create_vip_listener_is %s"),l)
return l
def _delete_db_listener(self, context, id, vip_id=None):
super(LoadBalancerPlugin, self).delete_vip_listener(context, id, vip_id)
def delete_vip_listener(self, context, id, vip_id=None):
self.update_status(context, lb_db.Listener,
id, constants.PENDING_DELETE)
v = self.get_vip(context, vip_id)
driver = self._get_driver_for_pool(context, v['pool_id'])
driver.delete_vip_listener(context, vip_id, v['pool_id'], id)
def _validate_hm_parameters(self, delay, timeout):
if delay < timeout:
raise loadbalancer.DelayOrTimeoutInvalid()
def create_health_monitor(self, context, health_monitor):
new_hm = health_monitor['health_monitor']
self._validate_hm_parameters(new_hm['delay'], new_hm['timeout'])
hm = super(LoadBalancerPlugin, self).create_health_monitor(
context,
health_monitor
)
return hm
def update_health_monitor(self, context, id, health_monitor):
new_hm = health_monitor['health_monitor']
old_hm = self.get_health_monitor(context, id)
delay = new_hm.get('delay', old_hm.get('delay'))
timeout = new_hm.get('timeout', old_hm.get('timeout'))
self._validate_hm_parameters(delay, timeout)
hm = super(LoadBalancerPlugin, self).update_health_monitor(
context,
id,
health_monitor
)
with context.session.begin(subtransactions=True):
qry = context.session.query(
ldb.PoolMonitorAssociation
).filter_by(monitor_id=hm['id']).join(ldb.Pool)
for assoc in qry:
driver = self._get_driver_for_pool(context, assoc['pool_id'])
driver.update_pool_health_monitor(context, old_hm,
hm, assoc['pool_id'])
return hm
def _delete_db_pool_health_monitor(self, context, hm_id, pool_id):
super(LoadBalancerPlugin, self).delete_pool_health_monitor(context,
hm_id,
pool_id)
def _delete_db_health_monitor(self, context, id):
super(LoadBalancerPlugin, self).delete_health_monitor(context, id)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
retval = super(LoadBalancerPlugin, self).create_pool_health_monitor(
context,
health_monitor,
pool_id
)
monitor_id = health_monitor['health_monitor']['id']
hm = self.get_health_monitor(context, monitor_id)
driver = self._get_driver_for_pool(context, pool_id)
driver.create_pool_health_monitor(context, hm, pool_id)
return retval
def delete_pool_health_monitor(self, context, id, pool_id):
self.update_pool_health_monitor(context, id, pool_id,
constants.PENDING_DELETE)
hm = self.get_health_monitor(context, id)
driver = self._get_driver_for_pool(context, pool_id)
driver.delete_pool_health_monitor(context, hm, pool_id)
def stats(self, context, pool_id):
driver = self._get_driver_for_pool(context, pool_id)
stats_data = driver.stats(context, pool_id)
# if we get something from the driver -
# update the db and return the value from db
# else - return what we have in db
if stats_data:
super(LoadBalancerPlugin, self).update_pool_stats(
context,
pool_id,
stats_data
)
return super(LoadBalancerPlugin, self).stats(context,
pool_id)
def populate_vip_graph(self, context, vip):
"""Populate the vip with: pool, members, healthmonitors."""
pool = self.get_pool(context, vip['pool_id'])
vip['pool'] = pool
vip['members'] = [self.get_member(context, member_id)
for member_id in pool['members']]
vip['health_monitors'] = [self.get_health_monitor(context, hm_id)
for hm_id in pool['health_monitors']]
return vip
def validate_provider(self, provider):
if provider not in self.drivers:
raise pconf.ServiceProviderNotFound(
provider=provider, service_type=constants.LOADBALANCER)
| nash-x/hws | neutron/services/loadbalancer/plugin.py | Python | apache-2.0 | 15,871 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for TestIamPermissions
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-datacatalog
# [START datacatalog_v1beta1_generated_DataCatalog_TestIamPermissions_async]
from google.cloud import datacatalog_v1beta1
async def sample_test_iam_permissions():
# Create a client
client = datacatalog_v1beta1.DataCatalogAsyncClient()
# Initialize request argument(s)
request = datacatalog_v1beta1.TestIamPermissionsRequest(
resource="resource_value",
permissions=['permissions_value_1', 'permissions_value_2'],
)
# Make the request
response = await client.test_iam_permissions(request=request)
# Handle the response
print(response)
# [END datacatalog_v1beta1_generated_DataCatalog_TestIamPermissions_async]
| googleapis/python-datacatalog | samples/generated_samples/datacatalog_v1beta1_generated_data_catalog_test_iam_permissions_async.py | Python | apache-2.0 | 1,610 |