repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
jalexvig/tensorflow
refs/heads/master
tensorflow/contrib/lookup/lookup_ops_test.py
4
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tf.contrib.lookup.lookup.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import tempfile import numpy as np import six from tensorflow.contrib import lookup from tensorflow.python.client import session from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import saver from tensorflow.python.training import server_lib class HashTableOpTest(test.TestCase): def testHashTable(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllEqual([0, 1, -1], result) exported_keys_tensor, exported_values_tensor = table.export() self.assertItemsEqual([b"brain", b"salad", b"surgery"], exported_keys_tensor.eval()) self.assertItemsEqual([0, 1, 2], exported_values_tensor.eval()) def testHashTableFindHighRank(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant( [["brain", "salad"], ["tank", "tarkus"]]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([[0, 1], [-1, -1]], result) def testHashTableInitWithPythonArrays(self): with self.test_session(): default_val = -1 keys = ["brain", "salad", "surgery"] values = [0, 1, 2] table = lookup.HashTable( lookup.KeyValueTensorInitializer( keys, values, value_dtype=dtypes.int64), default_val) table.init.run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testHashTableInitWithNumPyArrays(self): with self.test_session(): default_val = -1 keys = np.array(["brain", "salad", "surgery"], dtype=np.str) values = np.array([0, 1, 2], dtype=np.int64) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testMultipleHashTables(self): with self.test_session() as sess: default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table1 = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table2 = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table3 = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) lookup_ops.tables_initializer().run() self.assertAllEqual(3, table1.size().eval()) self.assertAllEqual(3, table2.size().eval()) self.assertAllEqual(3, table3.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output1 = table1.lookup(input_string) output2 = table2.lookup(input_string) output3 = table3.lookup(input_string) out1, out2, out3 = sess.run([output1, output2, output3]) self.assertAllEqual([0, 1, -1], out1) self.assertAllEqual([0, 1, -1], out2) self.assertAllEqual([0, 1, -1], out3) def testHashTableWithTensorDefault(self): with self.test_session(): default_val = constant_op.constant(-1, dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testHashTableWithSparseTensorInput(self): with self.test_session() as sess: default_val = constant_op.constant(-1, dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() sp_indices = [[0, 0], [0, 1], [1, 0]] sp_shape = [2, 2] input_tensor = sparse_tensor.SparseTensor( constant_op.constant(sp_indices, dtypes.int64), constant_op.constant(["brain", "salad", "tank"]), constant_op.constant(sp_shape, dtypes.int64)) output = table.lookup(input_tensor) out_indices, out_values, out_shape = sess.run(output) self.assertAllEqual([0, 1, -1], out_values) self.assertAllEqual(sp_indices, out_indices) self.assertAllEqual(sp_shape, out_shape) def testSignatureMismatch(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() # Ref types do not produce a lookup signature mismatch. input_string_ref = variables.Variable("brain") variables.global_variables_initializer().run() self.assertEqual(0, table.lookup(input_string_ref).eval()) input_string = constant_op.constant([1, 2, 3], dtypes.int64) with self.assertRaises(TypeError): table.lookup(input_string) with self.assertRaises(TypeError): lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), "UNK") def testDTypes(self): with self.test_session(): default_val = -1 with self.assertRaises(TypeError): lookup.HashTable( lookup.KeyValueTensorInitializer(["a"], [1], [dtypes.string], dtypes.int64), default_val) def testNotInitialized(self): with self.test_session(): default_val = -1 table = lookup.HashTable( lookup.KeyValueTensorInitializer( ["a"], [1], value_dtype=dtypes.int64), default_val) input_string = constant_op.constant(["brain", "salad", "surgery"]) output = table.lookup(input_string) with self.assertRaisesOpError("Table not initialized"): output.eval() def testInitializeTwice(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() with self.assertRaisesOpError("Table already initialized"): table.init.run() def testInitializationWithInvalidDimensions(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64) with self.assertRaises(ValueError): lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) def testMultipleSessions(self): # Start a server server = server_lib.Server( { "local0": ["localhost:0"] }, protocol="grpc", start=True) # Create two sessions sharing the same state session1 = session.Session(server.target) session2 = session.Session(server.target) default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val, name="t1") # Init the table in the first session. with session1: table.init.run() self.assertAllEqual(3, table.size().eval()) # Init the table in the second session and verify that we do not get a # "Table already initialized" error. with session2: table.init.run() self.assertAllEqual(3, table.size().eval()) def testHashTableInt32String(self): with self.test_session(): default_val = "n/a" keys = constant_op.constant([0, 1, 2], dtypes.int32) values = constant_op.constant(["brain", "salad", "surgery"]) table = lookup.HashTable( lookup.KeyValueTensorInitializer(keys, values), default_val) table.init.run() input_tensor = constant_op.constant([0, 1, -1]) output = table.lookup(input_tensor) result = output.eval() self.assertAllEqual([b"brain", b"salad", b"n/a"], result) class MutableHashTableOpTest(test.TestCase): def testMutableHashTable(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllEqual([0, 1, -1], result) exported_keys, exported_values = table.export() self.assertAllEqual([None], exported_keys.get_shape().as_list()) self.assertAllEqual([None], exported_values.get_shape().as_list()) # exported data is in the order of the internal map, i.e. undefined sorted_keys = np.sort(exported_keys.eval()) sorted_values = np.sort(exported_values.eval()) self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys) self.assertAllEqual([0, 1, 2], sorted_values) def testSaveRestore(self): save_dir = os.path.join(self.get_temp_dir(), "save_restore") save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") with self.test_session(graph=ops.Graph()) as sess: v0 = variables.Variable(10.0, name="v0") v1 = variables.Variable(20.0, name="v1") default_val = -1 keys = constant_op.constant(["b", "c", "d"], dtypes.string) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableHashTable( dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True) save = saver.Saver() variables.global_variables_initializer().run() # Check that the parameter nodes have been initialized. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) with self.test_session(graph=ops.Graph()) as sess: v0 = variables.Variable(-1.0, name="v0") v1 = variables.Variable(-1.0, name="v1") default_val = -1 table = lookup.MutableHashTable( dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True) table.insert( constant_op.constant(["a", "c"], dtypes.string), constant_op.constant([12, 24], dtypes.int64)).run() self.assertAllEqual(2, table.size().eval()) save = saver.Saver() # Restore the saved values in the parameter nodes. save.restore(sess, save_path) # Check that the parameter nodes have been restored. self.assertEqual(10.0, v0.eval()) self.assertEqual(20.0, v1.eval()) self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["a", "b", "c", "d", "e"], dtypes.string) output = table.lookup(input_string) self.assertAllEqual([-1, 0, 1, 2, -1], output.eval()) def testSharing(self): # Start a server to store the table state server = server_lib.Server( { "local0": ["localhost:0"] }, protocol="grpc", start=True) # Create two sessions sharing the same state session1 = session.Session(server.target) session2 = session.Session(server.target) table = lookup.MutableHashTable( dtypes.int64, dtypes.string, "-", name="t1") # Populate the table in the first session with session1: self.assertAllEqual(0, table.size().eval()) keys = constant_op.constant([11, 12], dtypes.int64) values = constant_op.constant(["a", "b"]) table.insert(keys, values).run() self.assertAllEqual(2, table.size().eval()) output = table.lookup(constant_op.constant([11, 12, 13], dtypes.int64)) self.assertAllEqual([b"a", b"b", b"-"], output.eval()) # Verify that we can access the shared data from the second session with session2: self.assertAllEqual(2, table.size().eval()) output = table.lookup(constant_op.constant([10, 11, 12], dtypes.int64)) self.assertAllEqual([b"-", b"a", b"b"], output.eval()) def testMutableHashTableOfTensors(self): with self.test_session(): default_val = constant_op.constant([-1, -1], dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) self.assertAllEqual([3, 2], output.get_shape()) result = output.eval() self.assertAllEqual([[0, 1], [2, 3], [-1, -1]], result) exported_keys, exported_values = table.export() self.assertAllEqual([None], exported_keys.get_shape().as_list()) self.assertAllEqual([None, 2], exported_values.get_shape().as_list()) # exported data is in the order of the internal map, i.e. undefined sorted_keys = np.sort(exported_keys.eval()) sorted_values = np.sort(exported_values.eval()) self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys) self.assertAllEqual([[4, 5], [2, 3], [0, 1]], sorted_values) def testMutableHashTableExportInsert(self): with self.test_session(): default_val = constant_op.constant([-1, -1], dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64) table1 = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) self.assertAllEqual(0, table1.size().eval()) table1.insert(keys, values).run() self.assertAllEqual(3, table1.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) expected_output = [[0, 1], [2, 3], [-1, -1]] output1 = table1.lookup(input_string) self.assertAllEqual(expected_output, output1.eval()) exported_keys, exported_values = table1.export() self.assertAllEqual(3, exported_keys.eval().size) self.assertAllEqual(6, exported_values.eval().size) # Populate a second table from the exported data table2 = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) self.assertAllEqual(0, table2.size().eval()) table2.insert(exported_keys, exported_values).run() self.assertAllEqual(3, table2.size().eval()) # Verify lookup result is still the same output2 = table2.lookup(input_string) self.assertAllEqual(expected_output, output2.eval()) def testMutableHashTableOfTensorsInvalidShape(self): with self.test_session(): default_val = constant_op.constant([-1, -1], dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) # Shape [6] instead of [3, 2] values = constant_op.constant([0, 1, 2, 3, 4, 5], dtypes.int64) with self.assertRaisesOpError("Expected shape"): table.insert(keys, values).run() # Shape [2,3] instead of [3, 2] values = constant_op.constant([[0, 1, 2], [3, 4, 5]], dtypes.int64) with self.assertRaisesOpError("Expected shape"): table.insert(keys, values).run() # Shape [2, 2] instead of [3, 2] values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64) with self.assertRaisesOpError("Expected shape"): table.insert(keys, values).run() # Shape [3, 1] instead of [3, 2] values = constant_op.constant([[0], [2], [4]], dtypes.int64) with self.assertRaisesOpError("Expected shape"): table.insert(keys, values).run() # Valid Insert values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) def testMutableHashTableInvalidDefaultValue(self): with self.test_session(): default_val = constant_op.constant([[-1, -1]], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) with self.assertRaisesOpError("Default value must be a vector"): self.assertAllEqual(0, table.size().eval()) def testMutableHashTableDuplicateInsert(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery", "brain"]) values = constant_op.constant([0, 1, 2, 3], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([3, 1, -1], result) def testMutableHashTableFindHighRank(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant( [["brain", "salad"], ["tank", "tarkus"]]) output = table.lookup(input_string) self.assertAllEqual([2, 2], output.get_shape()) result = output.eval() self.assertAllEqual([[0, 1], [-1, -1]], result) def testMutableHashTableInsertHighRank(self): with self.test_session(): default_val = -1 keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]]) values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) table.insert(keys, values).run() self.assertAllEqual(4, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank", "tarkus"]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([0, 1, 3, -1], result) def testMutableHashTableOfTensorsFindHighRank(self): with self.test_session(): default_val = constant_op.constant([-1, -1, -1], dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant( [["brain", "salad"], ["tank", "tarkus"]]) output = table.lookup(input_string) self.assertAllEqual([2, 2, 3], output.get_shape()) result = output.eval() self.assertAllEqual( [[[0, 1, 2], [2, 3, 4]], [[-1, -1, -1], [-1, -1, -1]]], result) def testMultipleMutableHashTables(self): with self.test_session() as sess: default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table1 = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) table2 = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) table3 = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) table1.insert(keys, values).run() table2.insert(keys, values).run() table3.insert(keys, values).run() self.assertAllEqual(3, table1.size().eval()) self.assertAllEqual(3, table2.size().eval()) self.assertAllEqual(3, table3.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output1 = table1.lookup(input_string) output2 = table2.lookup(input_string) output3 = table3.lookup(input_string) out1, out2, out3 = sess.run([output1, output2, output3]) self.assertAllEqual([0, 1, -1], out1) self.assertAllEqual([0, 1, -1], out2) self.assertAllEqual([0, 1, -1], out3) def testMutableHashTableWithTensorDefault(self): with self.test_session(): default_val = constant_op.constant(-1, dtypes.int64) keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testSignatureMismatch(self): with self.test_session(): default_val = -1 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableHashTable(dtypes.string, dtypes.int64, default_val) # insert with keys of the wrong type with self.assertRaises(TypeError): table.insert(constant_op.constant([4, 5, 6]), values).run() # insert with values of the wrong type with self.assertRaises(TypeError): table.insert(keys, constant_op.constant(["a", "b", "c"])).run() self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string_ref = variables.Variable("brain") input_int64_ref = variables.Variable(-1, dtype=dtypes.int64) variables.global_variables_initializer().run() # Ref types do not produce an insert signature mismatch. table.insert(input_string_ref, input_int64_ref).run() self.assertAllEqual(3, table.size().eval()) # Ref types do not produce a lookup signature mismatch. self.assertEqual(-1, table.lookup(input_string_ref).eval()) # lookup with keys of the wrong type input_string = constant_op.constant([1, 2, 3], dtypes.int64) with self.assertRaises(TypeError): table.lookup(input_string).eval() # default value of the wrong type with self.assertRaises(TypeError): lookup.MutableHashTable(dtypes.string, dtypes.int64, "UNK") def testMutableHashTableStringFloat(self): with self.test_session(): default_val = -1.5 keys = constant_op.constant(["brain", "salad", "surgery"]) values = constant_op.constant([0, 1.1, 2.2], dtypes.float32) table = lookup.MutableHashTable(dtypes.string, dtypes.float32, default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) result = output.eval() self.assertAllClose([0, 1.1, default_val], result) def testMutableHashTableIntFloat(self): with self.test_session(): default_val = -1.0 keys = constant_op.constant([3, 7, 0], dtypes.int64) values = constant_op.constant([7.5, -1.2, 9.9], dtypes.float32) table = lookup.MutableHashTable(dtypes.int64, dtypes.float32, default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant([7, 0, 11], dtypes.int64) output = table.lookup(input_string) result = output.eval() self.assertAllClose([-1.2, 9.9, default_val], result) def testMutableHashTableInt64String(self): with self.test_session(): default_val = "n/a" keys = constant_op.constant([0, 1, 2], dtypes.int64) values = constant_op.constant(["brain", "salad", "surgery"]) table = lookup.MutableHashTable(dtypes.int64, dtypes.string, default_val) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant([0, 1, 3], dtypes.int64) output = table.lookup(input_string) result = output.eval() self.assertAllEqual((b"brain", b"salad", b"n/a"), result) class MutableDenseHashTableOpTest(test.TestCase): def testBasic(self): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=0) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant([11, 12, 15], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testBasicBool(self): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([True, True, True], dtypes.bool) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.bool, default_value=False, empty_key=0) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant([11, 12, 15], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllEqual([True, True, False], result) def testLookupUnknownShape(self): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=0) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) placeholder_keys = array_ops.placeholder(dtypes.int64) output = table.lookup(placeholder_keys) self.assertAllEqual(None, output.get_shape()) result = output.eval({placeholder_keys: [11, 12, 15]}) self.assertAllEqual([0, 1, -1], result) def testMapStringToFloat(self): with self.test_session(): keys = constant_op.constant(["a", "b", "c"], dtypes.string) values = constant_op.constant([0.0, 1.1, 2.2], dtypes.float32) default_value = constant_op.constant(-1.5, dtypes.float32) table = lookup.MutableDenseHashTable( dtypes.string, dtypes.float32, default_value=default_value, empty_key="") self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant(["a", "b", "d"], dtypes.string) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllClose([0, 1.1, -1.5], result) def testMapInt64ToFloat(self): for float_dtype in [dtypes.float32, dtypes.float64]: with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0.0, 1.1, 2.2], float_dtype) default_value = constant_op.constant(-1.5, float_dtype) table = lookup.MutableDenseHashTable( dtypes.int64, float_dtype, default_value=default_value, empty_key=0) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant([11, 12, 15], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllClose([0, 1.1, -1.5], result) def testVectorValues(self): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([[0, 1, 2, 3], [3, 4, 5, 6], [6, 7, 8, 9]], dtypes.int64) default_value = constant_op.constant([-1, -2, -3, -4], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, empty_key=0, initial_num_buckets=4) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) self.assertAllEqual(4, len(table.export()[0].eval())) table.insert( constant_op.constant([14], dtypes.int64), constant_op.constant([[2, 3, 4, 5]], dtypes.int64)).run() self.assertAllEqual(4, table.size().eval()) self.assertAllEqual(8, len(table.export()[0].eval())) input_string = constant_op.constant([11, 12, 15], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([3, 4], output.get_shape()) result = output.eval() self.assertAllEqual([[0, 1, 2, 3], [3, 4, 5, 6], [-1, -2, -3, -4]], result) def testVectorKeys(self): with self.test_session(): keys = constant_op.constant([[0, 1], [1, 2], [1, 3]], dtypes.int64) values = constant_op.constant([10, 11, 12], dtypes.int64) empty_key = constant_op.constant([0, 3], dtypes.int64) default_value = constant_op.constant(-1, dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, empty_key=empty_key, initial_num_buckets=8) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) table.insert( constant_op.constant([[0, 0]], dtypes.int64), constant_op.constant([13], dtypes.int64)).run() self.assertAllEqual(4, table.size().eval()) self.assertAllEqual(8, len(table.export()[0].eval())) input_string = constant_op.constant([[0, 1], [1, 2], [0, 2]], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllEqual([10, 11, -1], result) def testResize(self): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=0, initial_num_buckets=4) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) self.assertAllEqual(4, len(table.export()[0].eval())) keys2 = constant_op.constant([13, 14, 15, 16, 17], dtypes.int64) values2 = constant_op.constant([3, 4, 5, 6, 7], dtypes.int64) table.insert(keys2, values2).run() self.assertAllEqual(7, table.size().eval()) self.assertAllEqual(16, len(table.export()[0].eval())) keys3 = constant_op.constant([10, 11, 12, 13, 14, 15, 16, 17, 18], dtypes.int64) output = table.lookup(keys3) self.assertAllEqual([-1, 0, 1, 3, 4, 5, 6, 7, -1], output.eval()) def testExport(self): with self.test_session(): keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([1, 2, 3], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=100, initial_num_buckets=8) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) exported_keys, exported_values = table.export() self.assertAllEqual([None], exported_keys.get_shape().as_list()) self.assertAllEqual([None], exported_values.get_shape().as_list()) np_keys = exported_keys.eval() np_values = exported_values.eval() self.assertAllEqual(8, len(np_keys)) self.assertAllEqual(8, len(np_values)) # pair up keys and values, drop extra added dimension pairs = np.dstack((np_keys.flatten(), np_values.flatten()))[0] # sort by key pairs = pairs[pairs[:, 0].argsort()] self.assertAllEqual([[11, 1], [12, 2], [13, 3], [100, 0], [100, 0], [100, 0], [100, 0], [100, 0]], pairs) def testSaveRestore(self): save_dir = os.path.join(self.get_temp_dir(), "save_restore") save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") with self.test_session(graph=ops.Graph()) as sess: default_value = -1 empty_key = 0 keys = constant_op.constant([11, 12, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, empty_key=empty_key, name="t1", checkpoint=True, initial_num_buckets=32) save = saver.Saver() self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) self.assertAllEqual(32, len(table.export()[0].eval())) val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) with self.test_session(graph=ops.Graph()) as sess: table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, empty_key=empty_key, name="t1", checkpoint=True, initial_num_buckets=64) table.insert( constant_op.constant([11, 14], dtypes.int64), constant_op.constant([12, 24], dtypes.int64)).run() self.assertAllEqual(2, table.size().eval()) self.assertAllEqual(64, len(table.export()[0].eval())) save = saver.Saver() # Restore the saved values in the parameter nodes. save.restore(sess, save_path) self.assertAllEqual(3, table.size().eval()) self.assertAllEqual(32, len(table.export()[0].eval())) input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([-1, 0, 1, 2, -1], output.eval()) def testVectorSaveRestore(self): save_dir = os.path.join(self.get_temp_dir(), "vector_save_restore") save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") with self.test_session(graph=ops.Graph()) as sess: empty_key = constant_op.constant([11, 13], dtypes.int64) default_value = constant_op.constant([-1, -2], dtypes.int64) keys = constant_op.constant([[11, 12], [11, 14], [13, 14]], dtypes.int64) values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, empty_key=empty_key, name="t1", checkpoint=True, initial_num_buckets=32) save = saver.Saver() self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) self.assertAllEqual(32, len(table.export()[0].eval())) val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) with self.test_session(graph=ops.Graph()) as sess: empty_key = constant_op.constant([11, 13], dtypes.int64) default_value = constant_op.constant([-1, -2], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, empty_key=empty_key, name="t1", checkpoint=True, initial_num_buckets=64) table.insert( constant_op.constant([[11, 12], [13, 15]], dtypes.int64), constant_op.constant([[21, 22], [23, 24]], dtypes.int64)).run() self.assertAllEqual(2, table.size().eval()) self.assertAllEqual(64, len(table.export()[0].eval())) save = saver.Saver() # Restore the saved values in the parameter nodes. save.restore(sess, save_path) self.assertAllEqual(3, table.size().eval()) self.assertAllEqual(32, len(table.export()[0].eval())) input_string = constant_op.constant( [[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([[0, 1], [2, 3], [-1, -2], [4, 5], [-1, -2]], output.eval()) def testVectorScalarSaveRestore(self): save_dir = os.path.join(self.get_temp_dir(), "vector_scalar_save_restore") save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash") with self.test_session(graph=ops.Graph()) as sess: empty_key = constant_op.constant([11, 13], dtypes.int64) default_value = constant_op.constant(-1, dtypes.int64) keys = constant_op.constant([[11, 12], [11, 14], [13, 14]], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, empty_key=empty_key, name="t2", checkpoint=True, initial_num_buckets=32) save = saver.Saver() self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) self.assertAllEqual(32, len(table.export()[0].eval())) val = save.save(sess, save_path) self.assertTrue(isinstance(val, six.string_types)) self.assertEqual(save_path, val) with self.test_session(graph=ops.Graph()) as sess: empty_key = constant_op.constant([11, 13], dtypes.int64) default_value = constant_op.constant(-1, dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=default_value, empty_key=empty_key, name="t2", checkpoint=True, initial_num_buckets=64) table.insert( constant_op.constant([[11, 12], [13, 15]], dtypes.int64), constant_op.constant([3, 4], dtypes.int64)).run() self.assertAllEqual(2, table.size().eval()) self.assertAllEqual(64, len(table.export()[0].eval())) save = saver.Saver() # Restore the saved values in the parameter nodes. save.restore(sess, save_path) self.assertAllEqual(3, table.size().eval()) self.assertAllEqual(32, len(table.export()[0].eval())) input_string = constant_op.constant( [[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([0, 1, -1, 2, -1], output.eval()) def testReprobe(self): with self.test_session(): # Insert 6 keys into a table with 8 buckets. # The values are chosen to make sure collisions occur when using GCC STL keys = constant_op.constant([11, 12, 13, 19, 20, 21], dtypes.int64) values = constant_op.constant([51, 52, 53, 54, 55, 56], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=0, initial_num_buckets=8) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(6, table.size().eval()) input_string = constant_op.constant([10, 11, 12, 13, 14, 19, 20, 21, 22], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([9], output.get_shape()) result = output.eval() self.assertAllEqual([-1, 51, 52, 53, -1, 54, 55, 56, -1], result) def testCustomEmptyKey(self): with self.test_session(): keys = constant_op.constant([11, 0, 13], dtypes.int64) values = constant_op.constant([0, 1, 2], dtypes.int64) table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=12) self.assertAllEqual(0, table.size().eval()) table.insert(keys, values).run() self.assertAllEqual(3, table.size().eval()) input_string = constant_op.constant([11, 0, 15], dtypes.int64) output = table.lookup(input_string) self.assertAllEqual([3], output.get_shape()) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testErrors(self): with self.test_session(): table = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=0) # Inserting the empty key returns an error keys = constant_op.constant([11, 0], dtypes.int64) values = constant_op.constant([0, 1], dtypes.int64) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "empty_key"): table.insert(keys, values).run() # Looking up the empty key returns an error with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "empty_key"): table.lookup(keys).eval() # Arbitrary tensors of keys are not supported keys = constant_op.constant([[11, 0], [12, 1]], dtypes.int64) values = constant_op.constant([[11, 0], [12, 1]], dtypes.int64) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Expected key shape"): table.lookup(keys).eval() with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Expected key shape"): table.insert(keys, values).run() table2 = lookup.MutableDenseHashTable( dtypes.int64, dtypes.int64, default_value=-1, empty_key=17, initial_num_buckets=12) with self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Number of buckets must be"): self.assertAllEqual(0, table2.size().eval()) class IndexTableFromFile(test.TestCase): def _createVocabFile(self, basename, values=("brain", "salad", "surgery")): vocabulary_file = os.path.join(self.get_temp_dir(), basename) with open(vocabulary_file, "w") as f: f.write("\n".join(values) + "\n") return vocabulary_file def test_string_index_table_from_file(self): vocabulary_file = self._createVocabFile("f2i_vocab1.txt") with self.test_session(): table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=1) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, 3), ids.eval()) def test_string_index_table_from_file_tensor_filename(self): vocabulary_file = self._createVocabFile("f2i_vocab1.txt") with self.test_session(): vocabulary_file = constant_op.constant(vocabulary_file) table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=1) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, 3), ids.eval()) self.assertEqual(1, len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))) def test_string_index_table_from_file_placeholder_filename(self): vocabulary_file = self._createVocabFile("f2i_vocab1.txt") with self.test_session(): vocabulary_placeholder = array_ops.placeholder(dtypes.string, []) table = lookup.index_table_from_file( vocabulary_file=vocabulary_placeholder, num_oov_buckets=1) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) self.assertRaises(errors_impl.OpError, ids.eval) feed_dict = {vocabulary_placeholder.name: vocabulary_file} lookup_ops.tables_initializer().run(feed_dict=feed_dict) self.assertAllEqual((1, 2, 3), ids.eval()) self.assertEqual(0, len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))) def test_int32_index_table_from_file(self): vocabulary_file = self._createVocabFile( "f2i_vocab2.txt", values=("42", "1", "-1000")) with self.test_session(): table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=1, key_dtype=dtypes.int32) ids = table.lookup( constant_op.constant((1, -1000, 11), dtype=dtypes.int32)) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, 3), ids.eval()) def test_int64_index_table_from_file(self): vocabulary_file = self._createVocabFile( "f2i_vocab3.txt", values=("42", "1", "-1000")) with self.test_session(): table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=1, key_dtype=dtypes.int64) ids = table.lookup( constant_op.constant((1, -1000, 11), dtype=dtypes.int64)) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, 3), ids.eval()) def test_index_table_from_file_with_default_value(self): default_value = -42 vocabulary_file = self._createVocabFile("f2i_vocab4.txt") with self.test_session(): table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, default_value=default_value) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, default_value), ids.eval()) def test_index_table_from_file_with_oov_buckets(self): vocabulary_file = self._createVocabFile("f2i_vocab5.txt") with self.test_session(): table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, num_oov_buckets=1000) ids = table.lookup( constant_op.constant(["salad", "surgery", "tarkus", "toccata"])) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual( ( 1, # From vocabulary file. 2, # From vocabulary file. 867, # 3 + fingerprint("tarkus") mod 300. 860), # 3 + fingerprint("toccata") mod 300. ids.eval()) def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self): self.assertRaises( ValueError, lookup.index_table_from_file, vocabulary_file="") def test_index_table_from_file_fails_with_empty_vocabulary(self): self.assertRaises( ValueError, lookup.index_table_from_file, vocabulary_file=None) def test_index_table_from_file_with_vocab_size_too_small(self): vocabulary_file = self._createVocabFile("f2i_vocab6.txt") with self.test_session(): table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=2) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, -1, -1), ids.eval()) self.assertEqual(2, table.size().eval()) def test_index_table_from_file_with_vocab_size_too_large(self): vocabulary_file = self._createVocabFile("f2i_vocab7.txt") with self.test_session(): table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=4) self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Invalid vocab_size", table.init.run) def test_index_table_from_file_with_vocab_size(self): vocabulary_file = self._createVocabFile("f2i_vocab8.txt") self.assertRaises( ValueError, lookup.index_table_from_file, vocabulary_file=vocabulary_file, vocab_size=0) with self.test_session(): table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=3) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, -1), ids.eval()) self.assertEqual(3, table.size().eval()) def test_index_table_from_file_with_invalid_hashers(self): vocabulary_file = self._createVocabFile("invalid_hasher.txt") with self.test_session(): with self.assertRaises(TypeError): lookup.index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=3, num_oov_buckets=1, hasher_spec=1) table = lookup.index_table_from_file( vocabulary_file=vocabulary_file, vocab_size=3, num_oov_buckets=1, hasher_spec=lookup.HasherSpec("my-awesome-hash", None)) self.assertRaises(ValueError, table.lookup, constant_op.constant(["salad", "surgery", "tarkus"])) class KeyValueTensorInitializerTest(test.TestCase): def test_string(self): with ops.Graph().as_default(), self.test_session(): init = lookup.KeyValueTensorInitializer( ("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64) table = lookup.HashTable(init, default_value=-1) table.init.run() def test_int64(self): with ops.Graph().as_default(), self.test_session(): init = lookup.KeyValueTensorInitializer( (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64) table = lookup.HashTable(init, default_value=-1) table.init.run() def test_int32(self): with ops.Graph().as_default(), self.test_session(): init = lookup.KeyValueTensorInitializer( (42, 1, -1000), (0, 1, 2), dtypes.int32, dtypes.int64) table = lookup.HashTable(init, default_value=-1) with self.assertRaisesRegexp( errors_impl.OpError, "No OpKernel was registered"): table.init.run() class IndexTableFromTensor(test.TestCase): @test_util.run_in_graph_and_eager_modes def test_index_table_from_tensor_with_tensor_init(self): table = lookup.index_table_from_tensor( mapping=("brain", "salad", "surgery"), num_oov_buckets=1) if not context.executing_eagerly(): with self.assertRaises(errors_impl.OpError): self.evaluate(table.lookup( constant_op.constant(("salad", "surgery", "tarkus")))) else: # Reinitializing a table in eager should work. table = lookup.index_table_from_tensor( mapping=("brain", "salad", "surgery"), num_oov_buckets=1) self.evaluate(lookup_ops.tables_initializer()) ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus"))) self.assertAllEqual((1, 2, 3), self.evaluate(ids)) def test_int32_index_table_from_tensor_with_tensor_init(self): with self.test_session(): table = lookup.index_table_from_tensor( mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32) ids = table.lookup( constant_op.constant((1, -1000, 11), dtype=dtypes.int32)) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, 3), ids.eval()) def test_int64_index_table_from_tensor_with_tensor_init(self): with self.test_session(): table = lookup.index_table_from_tensor( mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64) ids = table.lookup( constant_op.constant((1, -1000, 11), dtype=dtypes.int64)) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, 3), ids.eval()) def test_index_table_from_tensor_with_default_value(self): default_value = -42 with self.test_session(): table = lookup.index_table_from_tensor( mapping=["brain", "salad", "surgery"], default_value=default_value) ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"])) self.assertRaises(errors_impl.OpError, ids.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, default_value), ids.eval()) def test_index_table_from_tensor_missing_mapping(self): with self.test_session(): with self.assertRaisesRegexp(ValueError, "mapping must be specified"): lookup.index_table_from_tensor(mapping=None, num_oov_buckets=1) def test_index_table_from_tensor_empty_mapping(self): with self.test_session(): table = lookup.index_table_from_tensor( mapping=np.array([], dtype=np.str_), num_oov_buckets=1) ids = table.lookup(constant_op.constant(["salad", "surgery", "brain"])) self.assertRaises(errors_impl.OpError, ids.eval) with self.assertRaisesRegexp( errors_impl.OpError, "keys and values cannot be empty"): lookup_ops.tables_initializer().run() def test_index_table_from_tensor_with_invalid_hashers(self): with self.test_session(): with self.assertRaises(TypeError): lookup.index_table_from_tensor( mapping=["brain", "salad", "surgery"], num_oov_buckets=1, hasher_spec=1) table = lookup.index_table_from_tensor( mapping=["brain", "salad", "surgery"], num_oov_buckets=1, hasher_spec=lookup.HasherSpec("my-awesome-hash", None)) self.assertRaises(ValueError, table.lookup, constant_op.constant(["salad", "surgery", "tarkus"])) class StringToIndexTest(test.TestCase): def test_string_to_index(self): with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) feats = constant_op.constant(["salad", "surgery", "tarkus"]) indices = lookup.string_to_index(feats, mapping=mapping_strings) self.assertRaises(errors_impl.OpError, indices.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, -1), indices.eval()) def test_duplicate_entries(self): with self.test_session(): mapping_strings = constant_op.constant(["hello", "hello"]) feats = constant_op.constant(["hello", "hola"]) _ = lookup.string_to_index(feats, mapping=mapping_strings) self.assertRaises(errors_impl.OpError, lookup_ops.tables_initializer().run) def test_string_to_index_with_default_value(self): default_value = -42 with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) feats = constant_op.constant(["salad", "surgery", "tarkus"]) indices = lookup.string_to_index( feats, mapping=mapping_strings, default_value=default_value) self.assertRaises(errors_impl.OpError, indices.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((1, 2, default_value), indices.eval()) class IndexToStringTableFromFileTest(test.TestCase): def _createVocabFile(self, basename): vocabulary_file = os.path.join(self.get_temp_dir(), basename) with open(vocabulary_file, "w") as f: f.write("\n".join(["brain", "salad", "surgery"]) + "\n") return vocabulary_file def test_index_to_string_table(self): vocabulary_file = self._createVocabFile("i2f_vocab1.txt") with self.test_session(): table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file) features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64)) self.assertRaises(errors_impl.OpError, features.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), features.eval()) def test_index_to_string_table_with_default_value(self): default_value = b"NONE" vocabulary_file = self._createVocabFile("f2i_vocab2.txt") with self.test_session(): table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file, default_value=default_value) features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64)) self.assertRaises(errors_impl.OpError, features.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((b"salad", b"surgery", default_value), features.eval()) def test_index_to_string_table_with_vocab_size_too_small(self): default_value = b"NONE" vocabulary_file = self._createVocabFile("f2i_vocab2.txt") with self.test_session(): table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file, vocab_size=2, default_value=default_value) features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64)) self.assertRaises(errors_impl.OpError, features.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((b"salad", default_value, default_value), features.eval()) def test_index_to_string_table_with_vocab_size_too_large(self): vocabulary_file = self._createVocabFile("f2i_vocab6.txt") with self.test_session(): table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file, vocab_size=4) features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64)) self.assertRaises(errors_impl.OpError, features.eval) init = lookup_ops.tables_initializer() self.assertRaisesRegexp(errors_impl.InvalidArgumentError, "Invalid vocab_size", init.run) def test_index_to_string_table_with_vocab_size(self): vocabulary_file = self._createVocabFile("f2i_vocab7.txt") with self.test_session(): table = lookup.index_to_string_table_from_file( vocabulary_file=vocabulary_file, vocab_size=3) features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64)) self.assertRaises(errors_impl.OpError, features.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((b"salad", b"surgery", b"UNK"), features.eval()) class IndexToStringTableFromTensorTest(test.TestCase): def test_index_to_string_table_from_tensor(self): with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) table = lookup.index_to_string_table_from_tensor( mapping=mapping_strings) indices = constant_op.constant([0, 1, 2, 3], dtypes.int64) features = table.lookup(indices) self.assertRaises(errors_impl.OpError, features.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), features.eval()) def test_duplicate_entries(self): with self.test_session(): mapping_strings = constant_op.constant(["hello", "hello"]) table = lookup.index_to_string_table_from_tensor( mapping=mapping_strings) indices = constant_op.constant([0, 1, 4], dtypes.int64) features = table.lookup(indices) lookup_ops.tables_initializer().run() self.assertAllEqual((b"hello", b"hello", b"UNK"), features.eval()) def test_index_to_string_with_default_value(self): default_value = b"NONE" with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) table = lookup.index_to_string_table_from_tensor( mapping=mapping_strings, default_value=default_value) indices = constant_op.constant([1, 2, 4], dtypes.int64) features = table.lookup(indices) self.assertRaises(errors_impl.OpError, features.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((b"salad", b"surgery", default_value), features.eval()) class IndexToStringTest(test.TestCase): def test_index_to_string(self): with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) indices = constant_op.constant([0, 1, 2, 3], dtypes.int64) feats = lookup.index_to_string(indices, mapping=mapping_strings) self.assertRaises(errors_impl.OpError, feats.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"), feats.eval()) def test_duplicate_entries(self): with self.test_session(): mapping_strings = constant_op.constant(["hello", "hello"]) indices = constant_op.constant([0, 1, 4], dtypes.int64) feats = lookup.index_to_string(indices, mapping=mapping_strings) lookup_ops.tables_initializer().run() self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval()) self.assertRaises(errors_impl.OpError, lookup_ops.tables_initializer().run) def test_index_to_string_with_default_value(self): default_value = b"NONE" with self.test_session(): mapping_strings = constant_op.constant(["brain", "salad", "surgery"]) indices = constant_op.constant([1, 2, 4], dtypes.int64) feats = lookup.index_to_string( indices, mapping=mapping_strings, default_value=default_value) self.assertRaises(errors_impl.OpError, feats.eval) lookup_ops.tables_initializer().run() self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval()) class InitializeTableFromFileOpTest(test.TestCase): def _createVocabFile(self, basename, values=("brain", "salad", "surgery")): vocabulary_file = os.path.join(self.get_temp_dir(), basename) with open(vocabulary_file, "w") as f: f.write("\n".join(values) + "\n") return vocabulary_file @test_util.run_in_graph_and_eager_modes def testInitializeStringTable(self): vocabulary_file = self._createVocabFile("one_column_1.txt") default_value = -1 table = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) self.evaluate(table.init) output = table.lookup(constant_op.constant(["brain", "salad", "tank"])) result = self.evaluate(output) self.assertAllEqual([0, 1, -1], result) def testInitializeInt64Table(self): vocabulary_file = self._createVocabFile( "one_column_int64.txt", values=("42", "1", "-1000")) with self.test_session(): default_value = -1 table = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.int64, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) table.init.run() output = table.lookup( constant_op.constant((42, 1, 11), dtype=dtypes.int64)) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testInitializeIndexTable(self): vocabulary_file = self._createVocabFile("one_column_2.txt") with self.test_session(): default_value = "UNK" key_index = lookup.TextFileIndex.LINE_NUMBER value_index = lookup.TextFileIndex.WHOLE_LINE table = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.int64, key_index, dtypes.string, value_index), default_value) table.init.run() input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64) output = table.lookup(input_values) result = output.eval() self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result) def testMultiColumn(self): vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt") with open(vocabulary_file, "w") as f: f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n") with self.test_session(): default_value = -1 key_index = 1 value_index = 2 table = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index), default_value) table.init.run() input_string = constant_op.constant(["brain", "salad", "surgery"]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([1, 5, 6], result) def testInvalidDataTypeInMultiColumn(self): vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt") with open(vocabulary_file, "w") as f: f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n") with self.test_session(): default_value = -1 key_index = 2 value_index = 1 table = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index), default_value) with self.assertRaisesOpError("is not a valid"): table.init.run() def testInvalidDataType(self): vocabulary_file = self._createVocabFile("one_column_3.txt") with self.test_session(): default_value = "UNK" key_index = lookup.TextFileIndex.WHOLE_LINE value_index = lookup.TextFileIndex.LINE_NUMBER with self.assertRaises(ValueError): lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.int64, key_index, dtypes.string, value_index), default_value) def testInvalidIndex(self): vocabulary_file = self._createVocabFile("one_column_4.txt") with self.test_session(): default_value = -1 key_index = 1 # second column of the line value_index = lookup.TextFileIndex.LINE_NUMBER table = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.string, key_index, dtypes.int64, value_index), default_value) with self.assertRaisesOpError("Invalid number of columns"): table.init.run() def testInitializeSameTableWithMultipleNodes(self): vocabulary_file = self._createVocabFile("one_column_5.txt") with self.test_session() as sess: shared_name = "shared-one-columm" default_value = -1 table1 = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value, shared_name=shared_name) table2 = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value, shared_name=shared_name) table3 = lookup.HashTable( lookup.TextFileInitializer(vocabulary_file, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value, shared_name=shared_name) lookup_ops.tables_initializer().run() input_string = constant_op.constant(["brain", "salad", "tank"]) output1 = table1.lookup(input_string) output2 = table2.lookup(input_string) output3 = table3.lookup(input_string) out1, out2, out3 = sess.run([output1, output2, output3]) self.assertAllEqual([0, 1, -1], out1) self.assertAllEqual([0, 1, -1], out2) self.assertAllEqual([0, 1, -1], out3) def testInitializeTableWithNoFilename(self): with self.test_session(): default_value = -1 with self.assertRaises(ValueError): lookup.HashTable( lookup.TextFileInitializer( "", dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) def testInitializeWithVocabSize(self): with self.test_session(): default_value = -1 vocab_size = 3 vocabulary_file1 = self._createVocabFile("one_column6.txt") table1 = lookup.HashTable( lookup.TextFileInitializer( vocabulary_file1, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER, vocab_size=vocab_size), default_value) # Initialize from file. table1.init.run() self.assertEquals(vocab_size, table1.size().eval()) vocabulary_file2 = self._createVocabFile("one_column7.txt") vocab_size = 5 table2 = lookup.HashTable( lookup.TextFileInitializer( vocabulary_file2, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER, vocab_size=vocab_size), default_value) with self.assertRaisesOpError("Invalid vocab_size"): table2.init.run() vocab_size = 1 vocabulary_file3 = self._createVocabFile("one_column3.txt") table3 = lookup.HashTable( lookup.TextFileInitializer( vocabulary_file3, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER, vocab_size=vocab_size), default_value) # Smaller vocab size reads only vocab_size records. table3.init.run() self.assertEquals(vocab_size, table3.size().eval()) def testFeedVocabularyName(self): vocabulary_file = self._createVocabFile("feed_vocabulary.txt") with self.test_session(): default_value = -1 table = lookup.HashTable( lookup.TextFileInitializer("old_file.txt", dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) # Initialize with non existing file (old_file.txt) should fail. # TODO(yleon): Update message, which might change per FileSystem. with self.assertRaisesOpError("old_file.txt"): table.init.run() # Initialize the model feeding the vocabulary file. filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS) table.init.run(feed_dict={filenames[0]: vocabulary_file}) input_string = constant_op.constant(["brain", "salad", "tank"]) output = table.lookup(input_string) result = output.eval() self.assertAllEqual([0, 1, -1], result) def testInvalidFilenames(self): vocabulary_file = self._createVocabFile("filename_shape.txt") with self.test_session(): default_value = -1 # Invalid data type other_type = constant_op.constant(1) with self.assertRaises(ValueError): lookup.HashTable( lookup.TextFileInitializer( other_type, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) # Non-scalar filename filenames = constant_op.constant([vocabulary_file, vocabulary_file]) with self.assertRaises(ValueError): lookup.HashTable( lookup.TextFileInitializer( filenames, dtypes.string, lookup.TextFileIndex.WHOLE_LINE, dtypes.int64, lookup.TextFileIndex.LINE_NUMBER), default_value) def testIdToStringTable(self): vocab_file = self._createVocabFile("feat_to_id_1.txt") with self.test_session(): default_value = "UNK" vocab_size = 3 table = lookup.HashTable( lookup.TextFileStringTableInitializer( vocab_file, vocab_size=vocab_size), default_value) table.init.run() input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64) out = table.lookup(input_values) self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], out.eval()) self.assertEquals(vocab_size, table.size().eval()) def testStringToIdTable(self): vocab_file = self._createVocabFile("feat_to_id_2.txt") with self.test_session(): default_value = -1 vocab_size = 3 table = lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value) table.init.run() input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"]) out = table.lookup(input_string) self.assertAllEqual([0, 1, 2, -1], out.eval()) self.assertEquals(vocab_size, table.size().eval()) def testInt64ToIdTable(self): vocab_file = self._createVocabFile( "feat_to_id_3.txt", values=("42", "1", "-1000")) with self.test_session(): default_value = -1 vocab_size = 3 table = lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64), default_value) table.init.run() out = table.lookup( constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)) self.assertAllEqual((0, 1, 2, -1), out.eval()) self.assertEquals(vocab_size, table.size().eval()) class IdTableWithHashBucketsTest(test.TestCase): def _createVocabFile(self, basename, values=("brain", "salad", "surgery")): vocabulary_file = os.path.join(self.get_temp_dir(), basename) with open(vocabulary_file, "w") as f: f.write("\n".join(values) + "\n") return vocabulary_file def testStringIdTableWithHashBuckets(self): vocab_file = self._createVocabFile("feat_to_id_1.txt") with self.test_session(): default_value = -1 vocab_size = 3 oov_buckets = 1 table = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value), oov_buckets) table.init.run() input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"]) out = table.lookup(input_string) self.assertAllEqual([0, 1, 2, 3], out.eval()) self.assertEquals(vocab_size + oov_buckets, table.size().eval()) def testInt32IdTableWithHashBuckets(self): vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000")) with self.test_session(): default_value = -1 vocab_size = 3 oov_buckets = 1 table = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64), default_value), oov_buckets, key_dtype=dtypes.int32) table.init.run() values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32) out = table.lookup(values) self.assertAllEqual([0, 1, 2, 3], out.eval()) self.assertEquals(vocab_size + oov_buckets, table.size().eval()) def testInt64IdTableWithHashBuckets(self): vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000")) with self.test_session(): default_value = -1 vocab_size = 3 oov_buckets = 1 table = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64), default_value), oov_buckets) table.init.run() values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64) out = table.lookup(values) self.assertAllEqual([0, 1, 2, 3], out.eval()) self.assertEquals(vocab_size + oov_buckets, table.size().eval()) def testStringIdTableWithOnlyHashBucket(self): with self.test_session(): oov_buckets = 5 # Set a table that only uses hash buckets, for each input value returns # an id calculated by fingerprint("input") mod oov_buckets. table = lookup.IdTableWithHashBuckets(None, oov_buckets) table.init.run() values = constant_op.constant(("brain", "salad", "surgery")) out = table.lookup(values) self.assertAllEqual( [ 3, # fingerprint("brain") mod 5. 1, # fingerprint("salad") mod 5. 4 # fingerprint("surgery") mod 5 ], out.eval()) self.assertEquals(oov_buckets, table.size().eval()) def testInt32IdTableWithOnlyHashBucket(self): with self.test_session(): oov_buckets = 5 # Set a table that only uses hash buckets, for each input value returns # an id calculated by fingerprint("input") mod oov_buckets. table = lookup.IdTableWithHashBuckets( None, oov_buckets, key_dtype=dtypes.int32) table.init.run() input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32) out = table.lookup(input_string) self.assertAllEqual( [ 1, # fingerprint("42") mod 5. 4, # fingerprint("1") mod 5. 2 # fingerprint("-1000") mod 5 ], out.eval()) self.assertEquals(oov_buckets, table.size().eval()) def testFloat64IdTableWithOnlyHashBucket(self): with self.test_session(): with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"): lookup.IdTableWithHashBuckets( None, num_oov_buckets=5, key_dtype=dtypes.float64) def testBoolIdTableWithOnlyHashBucket(self): with self.test_session(): with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"): lookup.IdTableWithHashBuckets( None, num_oov_buckets=5, key_dtype=dtypes.bool) def testIdTableWithHashBucketsWithMultipleInitializers(self): vocab_file = self._createVocabFile("feat_to_id_4.txt") with self.test_session() as sess: default_value = -1 vocab_size = 3 oov_buckets = 3 vocab_table = lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value) table1 = lookup.IdTableWithHashBuckets( vocab_table, oov_buckets, hasher_spec=lookup.FastHashSpec, name="table1") table2 = lookup.IdTableWithHashBuckets( vocab_table, oov_buckets, hasher_spec=lookup.StrongHashSpec((1, 2)), name="table2") lookup_ops.tables_initializer().run() input_string = constant_op.constant( ["fruit", "brain", "salad", "surgery", "UNK"]) out1 = table1.lookup(input_string) out2 = table2.lookup(input_string) out1, out2 = sess.run([out1, out2]) self.assertAllEqual([5, 0, 1, 2, 5], out1) self.assertAllEqual([5, 0, 1, 2, 3], out2) self.assertEquals(vocab_size + oov_buckets, table1.size().eval()) self.assertEquals(vocab_size + oov_buckets, table2.size().eval()) test_util.assert_ops_in_graph({ "table1_Lookup/hash_bucket": "StringToHashBucketFast", "table2_Lookup/hash_bucket": "StringToHashBucketStrong", }, sess.graph) def testIdTableWithHashBucketsInitializationAcrossSessions(self): vocab_file = self._createVocabFile("feat_to_id_5.txt") shared_name = "across-sessions" with self.test_session(): default_value = -1 vocab_size = 3 oov_buckets = 1 table1 = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value, shared_name=shared_name), oov_buckets) table1.init.run() input_string_1 = constant_op.constant( ["brain", "salad", "surgery", "UNK"]) out1 = table1.lookup(input_string_1) self.assertAllEqual([0, 1, 2, 3], out1.eval()) self.assertEquals(vocab_size + oov_buckets, table1.size().eval()) with self.test_session(): default_value = -1 vocab_size = 3 oov_buckets = 1 # Underlying lookup table already initialized in previous session. # No need to call table2.init.run() table2 = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value, shared_name=shared_name), oov_buckets) input_string_2 = constant_op.constant(["fruit", "salad", "UNK"]) out2 = table2.lookup(input_string_2) self.assertAllEqual([3, 1, 3], out2.eval()) self.assertEquals(vocab_size + oov_buckets, table2.size().eval()) def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self): vocab_file = self._createVocabFile("feat_to_id_6.txt") with self.test_session() as sess: default_value1 = -1 vocab_size = 3 oov_buckets = 0 table1 = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value1), oov_buckets) default_value2 = -2 table2 = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value2), oov_buckets) lookup_ops.tables_initializer().run() input_string_1 = constant_op.constant( ["brain", "salad", "surgery", "UNK"]) input_string_2 = constant_op.constant(["fruit", "salad", "UNK"]) out1 = table1.lookup(input_string_1) out2 = table2.lookup(input_string_2) out1, out2 = sess.run([out1, out2]) self.assertAllEqual([0, 1, 2, -1], out1) self.assertAllEqual([-2, 1, -2], out2) self.assertEquals(vocab_size + oov_buckets, table1.size().eval()) self.assertEquals(vocab_size + oov_buckets, table2.size().eval()) def testSparseTensor(self): vocab_file = self._createVocabFile("feat_to_id_7.txt") input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]] input_shape = [4, 4] with self.test_session() as sess: sp_features = sparse_tensor.SparseTensor( constant_op.constant(input_indices, dtypes.int64), constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"], dtypes.string), constant_op.constant(input_shape, dtypes.int64)) table = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=3), -1), 1) table.init.run() sp_ids = table.lookup(sp_features) self.assertAllEqual([5], sp_ids.values._shape_as_list()) sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run( [sp_ids.indices, sp_ids.values, sp_ids.dense_shape]) self.assertAllEqual(input_indices, sp_ids_ind) self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val) self.assertAllEqual(input_shape, sp_ids_shape) def testInt32SparseTensor(self): input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]] input_shape = [4, 4] with self.test_session() as sess: sp_features = sparse_tensor.SparseTensor( constant_op.constant(input_indices, dtypes.int64), constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32), constant_op.constant(input_shape, dtypes.int64)) table = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.KeyValueTensorInitializer( (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1), 1, key_dtype=dtypes.int32) table.init.run() sp_ids = table.lookup(sp_features) self.assertAllEqual([5], sp_ids.values._shape_as_list()) sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run( [sp_ids.indices, sp_ids.values, sp_ids.dense_shape]) self.assertAllEqual(input_indices, sp_ids_ind) self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val) self.assertAllEqual(input_shape, sp_ids_shape) def testInt64SparseTensor(self): input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]] input_shape = [4, 4] with self.test_session() as sess: sp_features = sparse_tensor.SparseTensor( constant_op.constant(input_indices, dtypes.int64), constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64), constant_op.constant(input_shape, dtypes.int64)) table = lookup.IdTableWithHashBuckets( lookup.HashTable( lookup.KeyValueTensorInitializer( (42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64), -1), 1, key_dtype=dtypes.int64) table.init.run() sp_ids = table.lookup(sp_features) self.assertAllEqual([5], sp_ids.values._shape_as_list()) sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run( [sp_ids.indices, sp_ids.values, sp_ids.dense_shape]) self.assertAllEqual(input_indices, sp_ids_ind) self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val) self.assertAllEqual(input_shape, sp_ids_shape) def testIdTableWithHashBucketsWithInvalidHashers(self): vocab_file = self._createVocabFile("feat_to_id_4.txt") with self.test_session(): default_value = -1 vocab_size = 3 oov_buckets = 1 lookup_table = lookup.HashTable( lookup.TextFileIdTableInitializer( vocab_file, vocab_size=vocab_size), default_value) with self.assertRaises(TypeError): lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, hasher_spec=1) table = lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, hasher_spec=lookup.HasherSpec("my-awesome-hash", None)) input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"]) with self.assertRaises(ValueError): table.lookup(input_string) with self.assertRaises(ValueError): table = lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, hasher_spec=lookup.StrongHashSpec([])) with self.assertRaises(ValueError): table = lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, hasher_spec=lookup.StrongHashSpec([1, 2, 3])) with self.assertRaises(TypeError): table = lookup.IdTableWithHashBuckets( lookup_table, oov_buckets, hasher_spec=lookup.StrongHashSpec([None, 2])) if __name__ == "__main__": test.main()
collective/cyn.in
refs/heads/master
products/Plone4ArtistsCalendar/pythonlib/p4a/subtyper/__init__.py
4
from zope import component from p4a.subtyper.interfaces import ISubtyper from Products.CMFCore import DirectoryView DirectoryView.registerDirectory('skins', globals()) class activated(property): """A descriptor for setting or getting whether a subtype has been applied to an object. """ def __init__(self, desc_name, attr=None): self.desc_name = desc_name self.attr = attr def get_obj(self, obj): if not self.attr: return obj return getattr(obj, self.attr, None) def __get__(self, obj, type=None): subtyper = component.queryUtility(ISubtyper) if subtyper is None: return False realobj = self.get_obj(obj) if realobj is None: return False descwithname = subtyper.existing_type(realobj) if descwithname is None: return False return descwithname.name == self.desc_name def __set__(self, obj, v): subtyper = component.getUtility(ISubtyper) v = bool(v) orig = self.__get__(obj) realobj = self.get_obj(obj) if v and not orig: subtyper.change_type(realobj, self.desc_name) elif not v and orig: subtyper.remove_type(realobj) def __delete__(self, obj): self.__set__(obj, False)
richard-fisher/repository
refs/heads/master
xorg/library/libICE/actions.py
6
#!/usr/bin/python from pisi.actionsapi import shelltools, get, autotools, pisitools def setup(): autotools.configure("--disable-static") def build(): autotools.make() def install(): autotools.install()
swquinn/ronin
refs/heads/development
ronin/strategies/rsync.py
1
# Copyright (c) 2015 Sean Quinn # # Licensed under the MIT License (http://opensource.org/licenses/MIT) # # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished # to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT # OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from . import FileSyncStrategy from pprint import pprint import logging import os import subprocess #: The logging apparatus. logger = logging.getLogger(__name__) class RsyncStrategy(FileSyncStrategy): """ """ def get_args(self): """ """ manifest = self.manifest args = list() #: Append all of the arguments specified in the manifest file to #: the list of arguments that the rsync handler will use. for arg in manifest.args: args.append(str(arg)) #: for exclusion in manifest.exclude: args.append("--exclude="+str(exclusion)) args.append(self.source) args.append(self.target) return args def invoke(self): command = list(["rsync"]) if self.manifest.elevate: command.insert(0, "sudo") command = command + self.get_args() logger.debug("Running command: {0}".format(" ".join(command))) return subprocess.call(command)
orion1024/Sick-Beard
refs/heads/master
lib/requests/packages/urllib3/filepost.py
64
# urllib3/filepost.py # Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import codecs import mimetypes from uuid import uuid4 from io import BytesIO from .packages import six from .packages.six import b writer = codecs.lookup('utf-8')[3] def choose_boundary(): """ Our embarassingly-simple replacement for mimetools.choose_boundary. """ return uuid4().hex def get_content_type(filename): return mimetypes.guess_type(filename)[0] or 'application/octet-stream' def iter_fields(fields): """ Iterate over fields. Supports list of (k, v) tuples and dicts. """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) return ((k, v) for k, v in fields) def encode_multipart_formdata(fields, boundary=None): """ Encode a dictionary of ``fields`` using the multipart/form-data mime format. :param fields: Dictionary of fields or list of (key, value) field tuples. The key is treated as the field name, and the value as the body of the form-data bytes. If the value is a tuple of two elements, then the first element is treated as the filename of the form-data section. Field names and filenames must be unicode. :param boundary: If not specified, then a random boundary will be generated using :func:`mimetools.choose_boundary`. """ body = BytesIO() if boundary is None: boundary = choose_boundary() for fieldname, value in iter_fields(fields): body.write(b('--%s\r\n' % (boundary))) if isinstance(value, tuple): filename, data = value writer(body).write('Content-Disposition: form-data; name="%s"; ' 'filename="%s"\r\n' % (fieldname, filename)) body.write(b('Content-Type: %s\r\n\r\n' % (get_content_type(filename)))) else: data = value writer(body).write('Content-Disposition: form-data; name="%s"\r\n' % (fieldname)) body.write(b'Content-Type: text/plain\r\n\r\n') if isinstance(data, int): data = str(data) # Backwards compatibility if isinstance(data, six.text_type): writer(body).write(data) else: body.write(data) body.write(b'\r\n') body.write(b('--%s--\r\n' % (boundary))) content_type = b('multipart/form-data; boundary=%s' % boundary) return body.getvalue(), content_type
nisavid/bedframe
refs/heads/master
bedframe/auth/session/_connectors.py
1
"""Connectors""" __copyright__ = "Copyright (C) 2014 Ivan D Vasin" __docformat__ = "restructuredtext" from .. import _connectors class SessionSupplicant(_connectors.Supplicant): """An authentication supplicant for session-based HTTP authentication :param session_manager: The session-based authentication manager. :type session_manager: :class:`bedframe.auth.http.HttpSessionAuthManager \ <bedframe.auth.http._session._managers.HttpSessionAuthManager>` """ def __init__(self, session_manager, **kwargs): super(SessionSupplicant, self).__init__(**kwargs) self._session_manager = session_manager @property def session_manager(self): """The session-based authentication manager :type: :class:`bedframe.auth.http.HttpSessionAuthManager \ <bedframe.auth.http._session._managers.HttpSessionAuthManager>` """ return self._session_manager
jgmanzanas/CMNT_004_15
refs/heads/master
project-addons/asperience_edi/models/edi_fields.py
1
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 Comunitea Servicios Tecnológicos All Rights Reserved # $Omar Castiñeira Saaevdra <omar@comunitea.com>$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp import models, fields class SaleOrder(models.Model): _inherit = "sale.order" urgent = fields.Boolean("Urgent") top_date = fields.Date("Limit date") class ProductUom(models.Model): _inherit = "product.uom" edi_code = fields.Char("Edi code") class ResPartner(models.Model): _inherit = "res.partner" gln = fields.Char("GLN") class PaymentMode(models.Model): _inherit = "payment.mode" edi_code = fields.Char("Edi code")
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
refs/heads/master
orcid_api_v3/models/research_resources_v30_rc1.py
1
# coding: utf-8 """ ORCID Member No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501 OpenAPI spec version: Latest Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six from orcid_api_v3.models.last_modified_date_v30_rc1 import LastModifiedDateV30Rc1 # noqa: F401,E501 from orcid_api_v3.models.research_resource_group_v30_rc1 import ResearchResourceGroupV30Rc1 # noqa: F401,E501 class ResearchResourcesV30Rc1(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'last_modified_date': 'LastModifiedDateV30Rc1', 'group': 'list[ResearchResourceGroupV30Rc1]', 'path': 'str' } attribute_map = { 'last_modified_date': 'last-modified-date', 'group': 'group', 'path': 'path' } def __init__(self, last_modified_date=None, group=None, path=None): # noqa: E501 """ResearchResourcesV30Rc1 - a model defined in Swagger""" # noqa: E501 self._last_modified_date = None self._group = None self._path = None self.discriminator = None if last_modified_date is not None: self.last_modified_date = last_modified_date if group is not None: self.group = group if path is not None: self.path = path @property def last_modified_date(self): """Gets the last_modified_date of this ResearchResourcesV30Rc1. # noqa: E501 :return: The last_modified_date of this ResearchResourcesV30Rc1. # noqa: E501 :rtype: LastModifiedDateV30Rc1 """ return self._last_modified_date @last_modified_date.setter def last_modified_date(self, last_modified_date): """Sets the last_modified_date of this ResearchResourcesV30Rc1. :param last_modified_date: The last_modified_date of this ResearchResourcesV30Rc1. # noqa: E501 :type: LastModifiedDateV30Rc1 """ self._last_modified_date = last_modified_date @property def group(self): """Gets the group of this ResearchResourcesV30Rc1. # noqa: E501 :return: The group of this ResearchResourcesV30Rc1. # noqa: E501 :rtype: list[ResearchResourceGroupV30Rc1] """ return self._group @group.setter def group(self, group): """Sets the group of this ResearchResourcesV30Rc1. :param group: The group of this ResearchResourcesV30Rc1. # noqa: E501 :type: list[ResearchResourceGroupV30Rc1] """ self._group = group @property def path(self): """Gets the path of this ResearchResourcesV30Rc1. # noqa: E501 :return: The path of this ResearchResourcesV30Rc1. # noqa: E501 :rtype: str """ return self._path @path.setter def path(self, path): """Sets the path of this ResearchResourcesV30Rc1. :param path: The path of this ResearchResourcesV30Rc1. # noqa: E501 :type: str """ self._path = path def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(ResearchResourcesV30Rc1, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, ResearchResourcesV30Rc1): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
sup95/zulip
refs/heads/master
scripts/lib/setup_path_on_import.py
4
""" Use libraries from a virtualenv (by modifying sys.path) in production. Also add Zulip's root directory to sys.path """ import os from os.path import dirname, abspath import sys BASE_DIR = dirname(dirname(dirname(abspath(__file__)))) activate_this = os.path.join(BASE_DIR, "zulip-venv", "bin", "activate_this.py") if os.path.exists(activate_this): # this file will exist in production exec(open(activate_this).read(), {}, dict(__file__=activate_this)) sys.path.append(BASE_DIR)
patrickwind/My_Blog
refs/heads/master
venv/lib/python2.7/site-packages/werkzeug/local.py
147
# -*- coding: utf-8 -*- """ werkzeug.local ~~~~~~~~~~~~~~ This module implements context-local objects. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ from functools import update_wrapper from werkzeug.wsgi import ClosingIterator from werkzeug._compat import PY2, implements_bool # since each thread has its own greenlet we can just use those as identifiers # for the context. If greenlets are not available we fall back to the # current thread ident depending on where it is. try: from greenlet import getcurrent as get_ident except ImportError: try: from thread import get_ident except ImportError: from _thread import get_ident def release_local(local): """Releases the contents of the local for the current context. This makes it possible to use locals without a manager. Example:: >>> loc = Local() >>> loc.foo = 42 >>> release_local(loc) >>> hasattr(loc, 'foo') False With this function one can release :class:`Local` objects as well as :class:`LocalStack` objects. However it is not possible to release data held by proxies that way, one always has to retain a reference to the underlying local object in order to be able to release it. .. versionadded:: 0.6.1 """ local.__release_local__() class Local(object): __slots__ = ('__storage__', '__ident_func__') def __init__(self): object.__setattr__(self, '__storage__', {}) object.__setattr__(self, '__ident_func__', get_ident) def __iter__(self): return iter(self.__storage__.items()) def __call__(self, proxy): """Create a proxy for a name.""" return LocalProxy(self, proxy) def __release_local__(self): self.__storage__.pop(self.__ident_func__(), None) def __getattr__(self, name): try: return self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): ident = self.__ident_func__() storage = self.__storage__ try: storage[ident][name] = value except KeyError: storage[ident] = {name: value} def __delattr__(self, name): try: del self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) class LocalStack(object): """This class works similar to a :class:`Local` but keeps a stack of objects instead. This is best explained with an example:: >>> ls = LocalStack() >>> ls.push(42) >>> ls.top 42 >>> ls.push(23) >>> ls.top 23 >>> ls.pop() 23 >>> ls.top 42 They can be force released by using a :class:`LocalManager` or with the :func:`release_local` function but the correct way is to pop the item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). By calling the stack without arguments it returns a proxy that resolves to the topmost item on the stack. .. versionadded:: 0.6.1 """ def __init__(self): self._local = Local() def __release_local__(self): self._local.__release_local__() def _get__ident_func__(self): return self._local.__ident_func__ def _set__ident_func__(self, value): object.__setattr__(self._local, '__ident_func__', value) __ident_func__ = property(_get__ident_func__, _set__ident_func__) del _get__ident_func__, _set__ident_func__ def __call__(self): def _lookup(): rv = self.top if rv is None: raise RuntimeError('object unbound') return rv return LocalProxy(_lookup) def push(self, obj): """Pushes a new item to the stack""" rv = getattr(self._local, 'stack', None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv def pop(self): """Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, 'stack', None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop() @property def top(self): """The topmost item on the stack. If the stack is empty, `None` is returned. """ try: return self._local.stack[-1] except (AttributeError, IndexError): return None class LocalManager(object): """Local objects cannot manage themselves. For that you need a local manager. You can pass a local manager multiple locals or add them later by appending them to `manager.locals`. Everytime the manager cleans up it, will clean up all the data left in the locals for this context. The `ident_func` parameter can be added to override the default ident function for the wrapped locals. .. versionchanged:: 0.6.1 Instead of a manager the :func:`release_local` function can be used as well. .. versionchanged:: 0.7 `ident_func` was added. """ def __init__(self, locals=None, ident_func=None): if locals is None: self.locals = [] elif isinstance(locals, Local): self.locals = [locals] else: self.locals = list(locals) if ident_func is not None: self.ident_func = ident_func for local in self.locals: object.__setattr__(local, '__ident_func__', ident_func) else: self.ident_func = get_ident def get_ident(self): """Return the context identifier the local objects use internally for this context. You cannot override this method to change the behavior but use it to link other context local objects (such as SQLAlchemy's scoped sessions) to the Werkzeug locals. .. versionchanged:: 0.7 Yu can pass a different ident function to the local manager that will then be propagated to all the locals passed to the constructor. """ return self.ident_func() def cleanup(self): """Manually clean up the data in the locals for this context. Call this at the end of the request or use `make_middleware()`. """ for local in self.locals: release_local(local) def make_middleware(self, app): """Wrap a WSGI application so that cleaning up happens after request end. """ def application(environ, start_response): return ClosingIterator(app(environ, start_response), self.cleanup) return application def middleware(self, func): """Like `make_middleware` but for decorating functions. Example usage:: @manager.middleware def application(environ, start_response): ... The difference to `make_middleware` is that the function passed will have all the arguments copied from the inner application (name, docstring, module). """ return update_wrapper(self.make_middleware(func), func) def __repr__(self): return '<%s storages: %d>' % ( self.__class__.__name__, len(self.locals) ) @implements_bool class LocalProxy(object): """Acts as a proxy for a werkzeug local. Forwards all operations to a proxied object. The only operations not supported for forwarding are right handed operands and any kind of assignment. Example usage:: from werkzeug.local import Local l = Local() # these are proxies request = l('request') user = l('user') from werkzeug.local import LocalStack _response_local = LocalStack() # this is a proxy response = _response_local() Whenever something is bound to l.user / l.request the proxy objects will forward all operations. If no object is bound a :exc:`RuntimeError` will be raised. To create proxies to :class:`Local` or :class:`LocalStack` objects, call the object as shown above. If you want to have a proxy to an object looked up by a function, you can (as of Werkzeug 0.6.1) pass a function to the :class:`LocalProxy` constructor:: session = LocalProxy(lambda: get_current_request().session) .. versionchanged:: 0.6.1 The class can be instanciated with a callable as well now. """ __slots__ = ('__local', '__dict__', '__name__') def __init__(self, local, name=None): object.__setattr__(self, '_LocalProxy__local', local) object.__setattr__(self, '__name__', name) def _get_current_object(self): """Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ if not hasattr(self.__local, '__release_local__'): return self.__local() try: return getattr(self.__local, self.__name__) except AttributeError: raise RuntimeError('no object bound to %s' % self.__name__) @property def __dict__(self): try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError('__dict__') def __repr__(self): try: obj = self._get_current_object() except RuntimeError: return '<%s unbound>' % self.__class__.__name__ return repr(obj) def __bool__(self): try: return bool(self._get_current_object()) except RuntimeError: return False def __unicode__(self): try: return unicode(self._get_current_object()) except RuntimeError: return repr(self) def __dir__(self): try: return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name): if name == '__members__': return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key, value): self._get_current_object()[key] = value def __delitem__(self, key): del self._get_current_object()[key] if PY2: __getslice__ = lambda x, i, j: x._get_current_object()[i:j] def __setslice__(self, i, j, seq): self._get_current_object()[i:j] = seq def __delslice__(self, i, j): del self._get_current_object()[i:j] __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v) __delattr__ = lambda x, n: delattr(x._get_current_object(), n) __str__ = lambda x: str(x._get_current_object()) __lt__ = lambda x, o: x._get_current_object() < o __le__ = lambda x, o: x._get_current_object() <= o __eq__ = lambda x, o: x._get_current_object() == o __ne__ = lambda x, o: x._get_current_object() != o __gt__ = lambda x, o: x._get_current_object() > o __ge__ = lambda x, o: x._get_current_object() >= o __cmp__ = lambda x, o: cmp(x._get_current_object(), o) __hash__ = lambda x: hash(x._get_current_object()) __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) __len__ = lambda x: len(x._get_current_object()) __getitem__ = lambda x, i: x._get_current_object()[i] __iter__ = lambda x: iter(x._get_current_object()) __contains__ = lambda x, i: i in x._get_current_object() __add__ = lambda x, o: x._get_current_object() + o __sub__ = lambda x, o: x._get_current_object() - o __mul__ = lambda x, o: x._get_current_object() * o __floordiv__ = lambda x, o: x._get_current_object() // o __mod__ = lambda x, o: x._get_current_object() % o __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) __pow__ = lambda x, o: x._get_current_object() ** o __lshift__ = lambda x, o: x._get_current_object() << o __rshift__ = lambda x, o: x._get_current_object() >> o __and__ = lambda x, o: x._get_current_object() & o __xor__ = lambda x, o: x._get_current_object() ^ o __or__ = lambda x, o: x._get_current_object() | o __div__ = lambda x, o: x._get_current_object().__div__(o) __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) __neg__ = lambda x: -(x._get_current_object()) __pos__ = lambda x: +(x._get_current_object()) __abs__ = lambda x: abs(x._get_current_object()) __invert__ = lambda x: ~(x._get_current_object()) __complex__ = lambda x: complex(x._get_current_object()) __int__ = lambda x: int(x._get_current_object()) __long__ = lambda x: long(x._get_current_object()) __float__ = lambda x: float(x._get_current_object()) __oct__ = lambda x: oct(x._get_current_object()) __hex__ = lambda x: hex(x._get_current_object()) __index__ = lambda x: x._get_current_object().__index__() __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) __enter__ = lambda x: x._get_current_object().__enter__() __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) __radd__ = lambda x, o: o + x._get_current_object() __rsub__ = lambda x, o: o - x._get_current_object() __rmul__ = lambda x, o: o * x._get_current_object() __rdiv__ = lambda x, o: o / x._get_current_object() if PY2: __rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o) else: __rtruediv__ = __rdiv__ __rfloordiv__ = lambda x, o: o // x._get_current_object() __rmod__ = lambda x, o: o % x._get_current_object() __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o)
endlessm/chromium-browser
refs/heads/master
third_party/chromite/api/gen/device/model_id_pb2.py
2
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: device/model_id.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='device/model_id.proto', package='device', syntax='proto3', serialized_options=_b('Z0go.chromium.org/chromiumos/infra/proto/go/device'), serialized_pb=_b('\n\x15\x64\x65vice/model_id.proto\x12\x06\x64\x65vice\"\x18\n\x07ModelId\x12\r\n\x05value\x18\x01 \x01(\tB2Z0go.chromium.org/chromiumos/infra/proto/go/deviceb\x06proto3') ) _MODELID = _descriptor.Descriptor( name='ModelId', full_name='device.ModelId', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='value', full_name='device.ModelId.value', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=33, serialized_end=57, ) DESCRIPTOR.message_types_by_name['ModelId'] = _MODELID _sym_db.RegisterFileDescriptor(DESCRIPTOR) ModelId = _reflection.GeneratedProtocolMessageType('ModelId', (_message.Message,), dict( DESCRIPTOR = _MODELID, __module__ = 'device.model_id_pb2' # @@protoc_insertion_point(class_scope:device.ModelId) )) _sym_db.RegisterMessage(ModelId) DESCRIPTOR._options = None # @@protoc_insertion_point(module_scope)
Kaisuke5/chainer
refs/heads/master
chainer/functions/activation/sigmoid.py
13
import ctypes import numpy from chainer import cuda from chainer import function from chainer.utils import type_check if cuda.cudnn_enabled: cudnn = cuda.cudnn libcudnn = cudnn.cudnn _mode = libcudnn.CUDNN_ACTIVATION_SIGMOID class Sigmoid(function.Function): """Logistic sigmoid function.""" def __init__(self, use_cudnn=True): self.use_cudnn = use_cudnn def check_type_forward(self, in_types): type_check.expect(in_types.size() == 1) type_check.expect(in_types[0].dtype == numpy.float32) def forward_cpu(self, x): self.y = 1 / (1 + numpy.exp(-x[0])) return self.y, def forward_gpu(self, x): if cuda.cudnn_enabled and self.use_cudnn: self.y = cuda.empty_like(x[0]) handle = cudnn.get_handle() x_mat = x[0].reshape(x[0].shape[0], -1, 1, 1) desc = cudnn.create_tensor_descriptor(x_mat) libcudnn.activationForward( handle, _mode, ctypes.c_float(1), desc.value, x_mat.data.ptr, ctypes.c_float(0), desc.value, self.y.data.ptr) else: self.y = cuda.elementwise( 'T x', 'T y', 'y = 1 / (1 + exp(-x))', 'sigmoid_fwd')(x[0]) return self.y, def backward_cpu(self, x, gy): return gy[0] * self.y * (1 - self.y), def backward_gpu(self, x, gy): if cuda.cudnn_enabled and self.use_cudnn: gx = cuda.empty_like(x[0]) handle = cudnn.get_handle() y_mat = self.y.reshape(self.y.shape[0], -1, 1, 1) desc = cudnn.create_tensor_descriptor(y_mat) libcudnn.activationBackward( handle, _mode, ctypes.c_float(1), desc.value, y_mat.data.ptr, desc.value, gy[0].data.ptr, desc.value, x[0].data.ptr, ctypes.c_float(0), desc.value, gx.data.ptr) else: gx = cuda.elementwise( 'T y, T gy', 'T gx', 'gx = gy * y * (1 - y)', 'sigmoid_bwd')(self.y, gy[0]) return gx, def sigmoid(x, use_cudnn=True): """Elementwise sigmoid logistic function :math:`f(x)=(1 + \\exp(-x))^{-1}`. Args: x (~chainer.Variable): Input variable. use_cudnn (bool): If True and CuDNN is enabled, then this function uses CuDNN as the core implementation. Returns: ~chainer.Variable: Output variable. """ return Sigmoid(use_cudnn)(x)
fernand/scipy
refs/heads/master
scipy/sparse/tests/test_sputils.py
21
"""unit tests for sparse utility functions""" from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import (TestCase, run_module_suite, assert_equal, assert_array_equal) from scipy.sparse import sputils class TestSparseUtils(TestCase): def test_upcast(self): assert_equal(sputils.upcast('intc'),np.intc) assert_equal(sputils.upcast('int32','float32'),np.float64) assert_equal(sputils.upcast('bool',complex,float),np.complex128) assert_equal(sputils.upcast('i','d'),np.float64) def test_getdtype(self): A = np.array([1],dtype='int8') assert_equal(sputils.getdtype(None,default=float),float) assert_equal(sputils.getdtype(None,a=A),np.int8) def test_isscalarlike(self): assert_equal(sputils.isscalarlike(3.0),True) assert_equal(sputils.isscalarlike(-4),True) assert_equal(sputils.isscalarlike(2.5),True) assert_equal(sputils.isscalarlike(1 + 3j),True) assert_equal(sputils.isscalarlike(np.array(3)),True) assert_equal(sputils.isscalarlike("16"), True) assert_equal(sputils.isscalarlike(np.array([3])), False) assert_equal(sputils.isscalarlike([[3]]), False) assert_equal(sputils.isscalarlike((1,)), False) assert_equal(sputils.isscalarlike((1,2)), False) def test_isintlike(self): assert_equal(sputils.isintlike(3.0),True) assert_equal(sputils.isintlike(-4),True) assert_equal(sputils.isintlike(np.array(3)),True) assert_equal(sputils.isintlike(np.array([3])), False) assert_equal(sputils.isintlike(2.5),False) assert_equal(sputils.isintlike(1 + 3j),False) assert_equal(sputils.isintlike((1,)), False) assert_equal(sputils.isintlike((1,2)), False) def test_isshape(self): assert_equal(sputils.isshape((1,2)),True) assert_equal(sputils.isshape((5,2)),True) assert_equal(sputils.isshape((1.5,2)),False) assert_equal(sputils.isshape((2,2,2)),False) assert_equal(sputils.isshape(([2],2)),False) def test_issequence(self): assert_equal(sputils.issequence((1,)),True) assert_equal(sputils.issequence((1,2,3)),True) assert_equal(sputils.issequence([1]),True) assert_equal(sputils.issequence([1,2,3]),True) assert_equal(sputils.issequence(np.array([1,2,3])),True) assert_equal(sputils.issequence(np.array([[1],[2],[3]])),False) assert_equal(sputils.issequence(3),False) def test_isdense(self): assert_equal(sputils.isdense(np.array([1])),True) assert_equal(sputils.isdense(np.matrix([1])),True) def test_compat_unique(self): x = np.array([0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 3, 3,3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6,6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9,9], dtype=np.int32) y, j1 = sputils._compat_unique_impl(x, return_index=True) j2 = np.array([0, 7, 14, 21, 28, 35, 42, 49, 56, 63]) assert_array_equal(j1, j2) if __name__ == "__main__": run_module_suite()
KarolAntczak/DeepModel
refs/heads/master
deepmodel/trainers/Batch.py
1
import numpy as np def get_next_batch(train_dataset, step, batch_size): offset = (step * batch_size) % (train_dataset.shape[0] - batch_size) return train_dataset[offset:(offset + batch_size), :] def get_next_batch_noised(train_dataset, step, batch_size, noise_ratio=0.5): batch = get_next_batch(train_dataset, step, batch_size) noise = np.random.choice([0, 1], size=batch.shape, p=[1.0-noise_ratio, noise_ratio]) return batch * noise
CAAD-RWTH/ClockworkForDynamo
refs/heads/master
nodes/2.x/python/Regex.SplitByRegularExpression.py
4
import clr import re if isinstance(IN[1], list): OUT = [IN[0].split(x) for x in IN[1]] else: OUT = IN[0].split(IN[1])
computersalat/ansible
refs/heads/devel
test/support/integration/plugins/modules/lvg.py
72
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2013, Alexander Bulimov <lazywolf0@gmail.com> # Based on lvol module by Jeroen Hoekx <jeroen.hoekx@dsquare.be> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = r''' --- author: - Alexander Bulimov (@abulimov) module: lvg short_description: Configure LVM volume groups description: - This module creates, removes or resizes volume groups. version_added: "1.1" options: vg: description: - The name of the volume group. type: str required: true pvs: description: - List of comma-separated devices to use as physical devices in this volume group. - Required when creating or resizing volume group. - The module will take care of running pvcreate if needed. type: list pesize: description: - "The size of the physical extent. I(pesize) must be a power of 2 of at least 1 sector (where the sector size is the largest sector size of the PVs currently used in the VG), or at least 128KiB." - Since Ansible 2.6, pesize can be optionally suffixed by a UNIT (k/K/m/M/g/G), default unit is megabyte. type: str default: "4" pv_options: description: - Additional options to pass to C(pvcreate) when creating the volume group. type: str version_added: "2.4" vg_options: description: - Additional options to pass to C(vgcreate) when creating the volume group. type: str version_added: "1.6" state: description: - Control if the volume group exists. type: str choices: [ absent, present ] default: present force: description: - If C(yes), allows to remove volume group with logical volumes. type: bool default: no seealso: - module: filesystem - module: lvol - module: parted notes: - This module does not modify PE size for already present volume group. ''' EXAMPLES = r''' - name: Create a volume group on top of /dev/sda1 with physical extent size = 32MB lvg: vg: vg.services pvs: /dev/sda1 pesize: 32 - name: Create a volume group on top of /dev/sdb with physical extent size = 128KiB lvg: vg: vg.services pvs: /dev/sdb pesize: 128K # If, for example, we already have VG vg.services on top of /dev/sdb1, # this VG will be extended by /dev/sdc5. Or if vg.services was created on # top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, # and then reduce by /dev/sda5. - name: Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. lvg: vg: vg.services pvs: /dev/sdb1,/dev/sdc5 - name: Remove a volume group with name vg.services lvg: vg: vg.services state: absent ''' import itertools import os from ansible.module_utils.basic import AnsibleModule def parse_vgs(data): vgs = [] for line in data.splitlines(): parts = line.strip().split(';') vgs.append({ 'name': parts[0], 'pv_count': int(parts[1]), 'lv_count': int(parts[2]), }) return vgs def find_mapper_device_name(module, dm_device): dmsetup_cmd = module.get_bin_path('dmsetup', True) mapper_prefix = '/dev/mapper/' rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) if rc != 0: module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) mapper_device = mapper_prefix + dm_name.rstrip() return mapper_device def parse_pvs(module, data): pvs = [] dm_prefix = '/dev/dm-' for line in data.splitlines(): parts = line.strip().split(';') if parts[0].startswith(dm_prefix): parts[0] = find_mapper_device_name(module, parts[0]) pvs.append({ 'name': parts[0], 'vg_name': parts[1], }) return pvs def main(): module = AnsibleModule( argument_spec=dict( vg=dict(type='str', required=True), pvs=dict(type='list'), pesize=dict(type='str', default='4'), pv_options=dict(type='str', default=''), vg_options=dict(type='str', default=''), state=dict(type='str', default='present', choices=['absent', 'present']), force=dict(type='bool', default=False), ), supports_check_mode=True, ) vg = module.params['vg'] state = module.params['state'] force = module.boolean(module.params['force']) pesize = module.params['pesize'] pvoptions = module.params['pv_options'].split() vgoptions = module.params['vg_options'].split() dev_list = [] if module.params['pvs']: dev_list = list(module.params['pvs']) elif state == 'present': module.fail_json(msg="No physical volumes given.") # LVM always uses real paths not symlinks so replace symlinks with actual path for idx, dev in enumerate(dev_list): dev_list[idx] = os.path.realpath(dev) if state == 'present': # check given devices for test_dev in dev_list: if not os.path.exists(test_dev): module.fail_json(msg="Device %s not found." % test_dev) # get pv list pvs_cmd = module.get_bin_path('pvs', True) if dev_list: pvs_filter_pv_name = ' || '.join( 'pv_name = {0}'.format(x) for x in itertools.chain(dev_list, module.params['pvs']) ) pvs_filter_vg_name = 'vg_name = {0}'.format(vg) pvs_filter = "--select '{0} || {1}' ".format(pvs_filter_pv_name, pvs_filter_vg_name) else: pvs_filter = '' rc, current_pvs, err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';' %s" % (pvs_cmd, pvs_filter)) if rc != 0: module.fail_json(msg="Failed executing pvs command.", rc=rc, err=err) # check pv for devices pvs = parse_pvs(module, current_pvs) used_pvs = [pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg] if used_pvs: module.fail_json(msg="Device %s is already in %s volume group." % (used_pvs[0]['name'], used_pvs[0]['vg_name'])) vgs_cmd = module.get_bin_path('vgs', True) rc, current_vgs, err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) if rc != 0: module.fail_json(msg="Failed executing vgs command.", rc=rc, err=err) changed = False vgs = parse_vgs(current_vgs) for test_vg in vgs: if test_vg['name'] == vg: this_vg = test_vg break else: this_vg = None if this_vg is None: if state == 'present': # create VG if module.check_mode: changed = True else: # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in dev_list: rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) vgcreate_cmd = module.get_bin_path('vgcreate') rc, _, err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', pesize, vg] + dev_list) if rc == 0: changed = True else: module.fail_json(msg="Creating volume group '%s' failed" % vg, rc=rc, err=err) else: if state == 'absent': if module.check_mode: module.exit_json(changed=True) else: if this_vg['lv_count'] == 0 or force: # remove VG vgremove_cmd = module.get_bin_path('vgremove', True) rc, _, err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) if rc == 0: module.exit_json(changed=True) else: module.fail_json(msg="Failed to remove volume group %s" % (vg), rc=rc, err=err) else: module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes" % (vg)) # resize VG current_devs = [os.path.realpath(pv['name']) for pv in pvs if pv['vg_name'] == vg] devs_to_remove = list(set(current_devs) - set(dev_list)) devs_to_add = list(set(dev_list) - set(current_devs)) if devs_to_add or devs_to_remove: if module.check_mode: changed = True else: if devs_to_add: devs_to_add_string = ' '.join(devs_to_add) # create PV pvcreate_cmd = module.get_bin_path('pvcreate', True) for current_dev in devs_to_add: rc, _, err = module.run_command([pvcreate_cmd] + pvoptions + ['-f', str(current_dev)]) if rc == 0: changed = True else: module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) # add PV to our VG vgextend_cmd = module.get_bin_path('vgextend', True) rc, _, err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) if rc == 0: changed = True else: module.fail_json(msg="Unable to extend %s by %s." % (vg, devs_to_add_string), rc=rc, err=err) # remove some PV from our VG if devs_to_remove: devs_to_remove_string = ' '.join(devs_to_remove) vgreduce_cmd = module.get_bin_path('vgreduce', True) rc, _, err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) if rc == 0: changed = True else: module.fail_json(msg="Unable to reduce %s by %s." % (vg, devs_to_remove_string), rc=rc, err=err) module.exit_json(changed=changed) if __name__ == '__main__': main()
kartikp1995/gnuradio
refs/heads/master
gr-fec/python/fec/LDPC/Generate_LDPC_matrix.py
18
#!/usr/bin/env python # # Copyright 2015 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from Generate_LDPC_matrix_functions import * # This is an example of how to generate a parity check matrix for # use with the LDPC Richardson Urbanke encoder. A significant amount # of matrix manipulation is required, so this process should be done # before using the encoder at run-time. This process can take quite # a while, with more time required for larger matrices. # Not all attempts to create a parity check matrix will be # successful. The script will terminate and output error messages # when the process fails. To increase verbosity, edit the verbose # variable at the top of Generate_LDPC_matrix_functions.py. # Because random number generation and # shuffling methods are used, it is not possible to predict what # starting conditions will result in success. It requires a bit of # trial and error. # ----------------------------------------------------------------- # # First, generate a regular LDPC parity check matrix. Specify # the properties desired. For example: n = 200 # number of columns, corresponds to codeword length p = 3 # column weight q = 5 # row weight parity_check_matrix = LDPC_matrix(n_p_q = [n,p,q]) # Richardson and Urbanke's preprocessing method requires a full rank # matrix to start. The matrices generated by the # regular_LDPC_code_contructor function will never be full rank. So, # use the get_full_rank_H_matrix function. newH = get_full_rank_H_matrix(parity_check_matrix.H) # At this point, the matrix is no longer regular. (The row/column # weights are not the same for all rows/columns.) # Next, some preprocessing steps need to be performed as described # Richardson and Urbanke in Modern Coding Theory, Appendix A. This # can take a while... [bestH,g] = get_best_matrix(newH,100) # Print out some of the resulting properties. n = bestH.shape[1] k = n - bestH.shape[0] print "Parity check matrix properties:" print "\tSize :", bestH.shape print "\tRank :", linalg.matrix_rank(bestH) print "\tRate : %.3f" % ((k*1.0)/n) print "\tn :", n, " (codeword length)" print "\tk :", k, " (info word length)" print "\tgap : %i" % g # Save the matrix to an alist file for future use: alist_filename = "n_%04i_k_%04i_gap_%02i.alist" % (n,k,g) write_alist_file(alist_filename,bestH) print '\nMatrix saved to alist file:', alist_filename, "\n"
MobinRanjbar/hue
refs/heads/master
desktop/core/ext-py/Paste-2.0.1/paste/recursive.py
50
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org) # Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php """ Middleware to make internal requests and forward requests internally. When applied, several keys are added to the environment that will allow you to trigger recursive redirects and forwards. paste.recursive.include: When you call ``environ['paste.recursive.include'](new_path_info)`` a response will be returned. The response has a ``body`` attribute, a ``status`` attribute, and a ``headers`` attribute. paste.recursive.script_name: The ``SCRIPT_NAME`` at the point that recursive lives. Only paths underneath this path can be redirected to. paste.recursive.old_path_info: A list of previous ``PATH_INFO`` values from previous redirects. Raise ``ForwardRequestException(new_path_info)`` to do a forward (aborting the current request). """ import six import warnings from six.moves import cStringIO as StringIO __all__ = ['RecursiveMiddleware'] __pudge_all__ = ['RecursiveMiddleware', 'ForwardRequestException'] class RecursionLoop(AssertionError): # Subclasses AssertionError for legacy reasons """Raised when a recursion enters into a loop""" class CheckForRecursionMiddleware(object): def __init__(self, app, env): self.app = app self.env = env def __call__(self, environ, start_response): path_info = environ.get('PATH_INFO','') if path_info in self.env.get( 'paste.recursive.old_path_info', []): raise RecursionLoop( "Forwarding loop detected; %r visited twice (internal " "redirect path: %s)" % (path_info, self.env['paste.recursive.old_path_info'])) old_path_info = self.env.setdefault('paste.recursive.old_path_info', []) old_path_info.append(self.env.get('PATH_INFO', '')) return self.app(environ, start_response) class RecursiveMiddleware(object): """ A WSGI middleware that allows for recursive and forwarded calls. All these calls go to the same 'application', but presumably that application acts differently with different URLs. The forwarded URLs must be relative to this container. Interface is entirely through the ``paste.recursive.forward`` and ``paste.recursive.include`` environmental keys. """ def __init__(self, application, global_conf=None): self.application = application def __call__(self, environ, start_response): environ['paste.recursive.forward'] = Forwarder( self.application, environ, start_response) environ['paste.recursive.include'] = Includer( self.application, environ, start_response) environ['paste.recursive.include_app_iter'] = IncluderAppIter( self.application, environ, start_response) my_script_name = environ.get('SCRIPT_NAME', '') environ['paste.recursive.script_name'] = my_script_name try: return self.application(environ, start_response) except ForwardRequestException as e: middleware = CheckForRecursionMiddleware( e.factory(self), environ) return middleware(environ, start_response) class ForwardRequestException(Exception): """ Used to signal that a request should be forwarded to a different location. ``url`` The URL to forward to starting with a ``/`` and relative to ``RecursiveMiddleware``. URL fragments can also contain query strings so ``/error?code=404`` would be a valid URL fragment. ``environ`` An altertative WSGI environment dictionary to use for the forwarded request. If specified is used *instead* of the ``url_fragment`` ``factory`` If specifed ``factory`` is used instead of ``url`` or ``environ``. ``factory`` is a callable that takes a WSGI application object as the first argument and returns an initialised WSGI middleware which can alter the forwarded response. Basic usage (must have ``RecursiveMiddleware`` present) : .. code-block:: python from paste.recursive import ForwardRequestException def app(environ, start_response): if environ['PATH_INFO'] == '/hello': start_response("200 OK", [('Content-type', 'text/plain')]) return [b'Hello World!'] elif environ['PATH_INFO'] == '/error': start_response("404 Not Found", [('Content-type', 'text/plain')]) return [b'Page not found'] else: raise ForwardRequestException('/error') from paste.recursive import RecursiveMiddleware app = RecursiveMiddleware(app) If you ran this application and visited ``/hello`` you would get a ``Hello World!`` message. If you ran the application and visited ``/not_found`` a ``ForwardRequestException`` would be raised and the caught by the ``RecursiveMiddleware``. The ``RecursiveMiddleware`` would then return the headers and response from the ``/error`` URL but would display a ``404 Not found`` status message. You could also specify an ``environ`` dictionary instead of a url. Using the same example as before: .. code-block:: python def app(environ, start_response): ... same as previous example ... else: new_environ = environ.copy() new_environ['PATH_INFO'] = '/error' raise ForwardRequestException(environ=new_environ) Finally, if you want complete control over every aspect of the forward you can specify a middleware factory. For example to keep the old status code but use the headers and resposne body from the forwarded response you might do this: .. code-block:: python from paste.recursive import ForwardRequestException from paste.recursive import RecursiveMiddleware from paste.errordocument import StatusKeeper def app(environ, start_response): if environ['PATH_INFO'] == '/hello': start_response("200 OK", [('Content-type', 'text/plain')]) return [b'Hello World!'] elif environ['PATH_INFO'] == '/error': start_response("404 Not Found", [('Content-type', 'text/plain')]) return [b'Page not found'] else: def factory(app): return StatusKeeper(app, status='404 Not Found', url='/error') raise ForwardRequestException(factory=factory) app = RecursiveMiddleware(app) """ def __init__( self, url=None, environ={}, factory=None, path_info=None): # Check no incompatible options have been chosen if factory and url: raise TypeError( 'You cannot specify factory and a url in ' 'ForwardRequestException') elif factory and environ: raise TypeError( 'You cannot specify factory and environ in ' 'ForwardRequestException') if url and environ: raise TypeError( 'You cannot specify environ and url in ' 'ForwardRequestException') # set the path_info or warn about its use. if path_info: if not url: warnings.warn( "ForwardRequestException(path_info=...) has been deprecated; please " "use ForwardRequestException(url=...)", DeprecationWarning, 2) else: raise TypeError('You cannot use url and path_info in ForwardRequestException') self.path_info = path_info # If the url can be treated as a path_info do that if url and not '?' in str(url): self.path_info = url # Base middleware class ForwardRequestExceptionMiddleware(object): def __init__(self, app): self.app = app # Otherwise construct the appropriate middleware factory if hasattr(self, 'path_info'): p = self.path_info def factory_(app): class PathInfoForward(ForwardRequestExceptionMiddleware): def __call__(self, environ, start_response): environ['PATH_INFO'] = p return self.app(environ, start_response) return PathInfoForward(app) self.factory = factory_ elif url: def factory_(app): class URLForward(ForwardRequestExceptionMiddleware): def __call__(self, environ, start_response): environ['PATH_INFO'] = url.split('?')[0] environ['QUERY_STRING'] = url.split('?')[1] return self.app(environ, start_response) return URLForward(app) self.factory = factory_ elif environ: def factory_(app): class EnvironForward(ForwardRequestExceptionMiddleware): def __call__(self, environ_, start_response): return self.app(environ, start_response) return EnvironForward(app) self.factory = factory_ else: self.factory = factory class Recursive(object): def __init__(self, application, environ, start_response): self.application = application self.original_environ = environ.copy() self.previous_environ = environ self.start_response = start_response def __call__(self, path, extra_environ=None): """ `extra_environ` is an optional dictionary that is also added to the forwarded request. E.g., ``{'HTTP_HOST': 'new.host'}`` could be used to forward to a different virtual host. """ environ = self.original_environ.copy() if extra_environ: environ.update(extra_environ) environ['paste.recursive.previous_environ'] = self.previous_environ base_path = self.original_environ.get('SCRIPT_NAME') if path.startswith('/'): assert path.startswith(base_path), ( "You can only forward requests to resources under the " "path %r (not %r)" % (base_path, path)) path = path[len(base_path)+1:] assert not path.startswith('/') path_info = '/' + path environ['PATH_INFO'] = path_info environ['REQUEST_METHOD'] = 'GET' environ['CONTENT_LENGTH'] = '0' environ['CONTENT_TYPE'] = '' environ['wsgi.input'] = StringIO('') return self.activate(environ) def activate(self, environ): raise NotImplementedError def __repr__(self): return '<%s.%s from %s>' % ( self.__class__.__module__, self.__class__.__name__, self.original_environ.get('SCRIPT_NAME') or '/') class Forwarder(Recursive): """ The forwarder will try to restart the request, except with the new `path` (replacing ``PATH_INFO`` in the request). It must not be called after and headers have been returned. It returns an iterator that must be returned back up the call stack, so it must be used like: .. code-block:: python return environ['paste.recursive.forward'](path) Meaningful transformations cannot be done, since headers are sent directly to the server and cannot be inspected or rewritten. """ def activate(self, environ): warnings.warn( "recursive.Forwarder has been deprecated; please use " "ForwardRequestException", DeprecationWarning, 2) return self.application(environ, self.start_response) class Includer(Recursive): """ Starts another request with the given path and adding or overwriting any values in the `extra_environ` dictionary. Returns an IncludeResponse object. """ def activate(self, environ): response = IncludedResponse() def start_response(status, headers, exc_info=None): if exc_info: six.reraise(exc_info[0], exc_info[1], exc_info[2]) response.status = status response.headers = headers return response.write app_iter = self.application(environ, start_response) try: for s in app_iter: response.write(s) finally: if hasattr(app_iter, 'close'): app_iter.close() response.close() return response class IncludedResponse(object): def __init__(self): self.headers = None self.status = None self.output = StringIO() self.str = None def close(self): self.str = self.output.getvalue() self.output.close() self.output = None def write(self, s): assert self.output is not None, ( "This response has already been closed and no further data " "can be written.") self.output.write(s) def __str__(self): return self.body def body__get(self): if self.str is None: return self.output.getvalue() else: return self.str body = property(body__get) class IncluderAppIter(Recursive): """ Like Includer, but just stores the app_iter response (be sure to call close on the response!) """ def activate(self, environ): response = IncludedAppIterResponse() def start_response(status, headers, exc_info=None): if exc_info: six.reraise(exc_info[0], exc_info[1], exc_info[2]) response.status = status response.headers = headers return response.write app_iter = self.application(environ, start_response) response.app_iter = app_iter return response class IncludedAppIterResponse(object): def __init__(self): self.status = None self.headers = None self.accumulated = [] self.app_iter = None self._closed = False def close(self): assert not self._closed, ( "Tried to close twice") if hasattr(self.app_iter, 'close'): self.app_iter.close() def write(self, s): self.accumulated.append def make_recursive_middleware(app, global_conf): return RecursiveMiddleware(app) make_recursive_middleware.__doc__ = __doc__
martonw/phantomjs
refs/heads/master
src/breakpad/src/third_party/protobuf/protobuf/python/ez_setup.py
454
#!python # This file was obtained from: # http://peak.telecommunity.com/dist/ez_setup.py # on 2011/1/21. """Bootstrap setuptools installation If you want to use setuptools in your package's setup.py, just include this file in the same directory with it, and add this to the top of your setup.py:: from ez_setup import use_setuptools use_setuptools() If you want to require a specific version of setuptools, set a download mirror, or use an alternate download directory, you can do so by supplying the appropriate options to ``use_setuptools()``. This file can also be run as a script to install or upgrade setuptools. """ import sys DEFAULT_VERSION = "0.6c11" DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3] md5_data = { 'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca', 'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb', 'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b', 'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a', 'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618', 'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac', 'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5', 'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4', 'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c', 'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b', 'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090', 'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4', 'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7', 'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5', 'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de', 'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b', 'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2', 'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086', 'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27', 'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277', 'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa', 'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e', 'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e', 'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f', 'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2', 'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc', 'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167', 'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64', 'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d', 'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20', 'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab', 'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53', 'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2', 'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e', 'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372', 'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902', 'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de', 'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b', 'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03', 'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a', 'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6', 'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a', } import sys, os try: from hashlib import md5 except ImportError: from md5 import md5 def _validate_md5(egg_name, data): if egg_name in md5_data: digest = md5(data).hexdigest() if digest != md5_data[egg_name]: print >>sys.stderr, ( "md5 validation of %s failed! (Possible download problem?)" % egg_name ) sys.exit(2) return data def use_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, download_delay=15 ): """Automatically find/download setuptools and make it available on sys.path `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where setuptools will be downloaded, if it is not already available. If `download_delay` is specified, it should be the number of seconds that will be paused before initiating a download, should one be required. If an older version of setuptools is installed, this routine will print a message to ``sys.stderr`` and raise SystemExit in an attempt to abort the calling script. """ was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules def do_download(): egg = download_setuptools(version, download_base, to_dir, download_delay) sys.path.insert(0, egg) import setuptools; setuptools.bootstrap_install_from = egg try: import pkg_resources except ImportError: return do_download() try: pkg_resources.require("setuptools>="+version); return except pkg_resources.VersionConflict, e: if was_imported: print >>sys.stderr, ( "The required version of setuptools (>=%s) is not available, and\n" "can't be installed while this script is running. Please install\n" " a more recent version first, using 'easy_install -U setuptools'." "\n\n(Currently using %r)" ) % (version, e.args[0]) sys.exit(2) except pkg_resources.DistributionNotFound: pass del pkg_resources, sys.modules['pkg_resources'] # reload ok return do_download() def download_setuptools( version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir, delay = 15 ): """Download setuptools from a specified location and return its filename `version` should be a valid setuptools version number that is available as an egg for download under the `download_base` URL (which should end with a '/'). `to_dir` is the directory where the egg will be downloaded. `delay` is the number of seconds to pause before an actual download attempt. """ import urllib2, shutil egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3]) url = download_base + egg_name saveto = os.path.join(to_dir, egg_name) src = dst = None if not os.path.exists(saveto): # Avoid repeated downloads try: from distutils import log if delay: log.warn(""" --------------------------------------------------------------------------- This script requires setuptools version %s to run (even to display help). I will attempt to download it for you (from %s), but you may need to enable firewall access for this script first. I will start the download in %d seconds. (Note: if this machine does not have network access, please obtain the file %s and place it in this directory before rerunning this script.) ---------------------------------------------------------------------------""", version, download_base, delay, url ); from time import sleep; sleep(delay) log.warn("Downloading %s", url) src = urllib2.urlopen(url) # Read/write all in one block, so we don't create a corrupt file # if the download is interrupted. data = _validate_md5(egg_name, src.read()) dst = open(saveto,"wb"); dst.write(data) finally: if src: src.close() if dst: dst.close() return os.path.realpath(saveto) def main(argv, version=DEFAULT_VERSION): """Install or upgrade setuptools and EasyInstall""" try: import setuptools except ImportError: egg = None try: egg = download_setuptools(version, delay=0) sys.path.insert(0,egg) from setuptools.command.easy_install import main return main(list(argv)+[egg]) # we're done here finally: if egg and os.path.exists(egg): os.unlink(egg) else: if setuptools.__version__ == '0.0.1': print >>sys.stderr, ( "You have an obsolete version of setuptools installed. Please\n" "remove it from your system entirely before rerunning this script." ) sys.exit(2) req = "setuptools>="+version import pkg_resources try: pkg_resources.require(req) except pkg_resources.VersionConflict: try: from setuptools.command.easy_install import main except ImportError: from easy_install import main main(list(argv)+[download_setuptools(delay=0)]) sys.exit(0) # try to force an exit else: if argv: from setuptools.command.easy_install import main main(argv) else: print "Setuptools version",version,"or greater has been installed." print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)' def update_md5(filenames): """Update our built-in md5 registry""" import re for name in filenames: base = os.path.basename(name) f = open(name,'rb') md5_data[base] = md5(f.read()).hexdigest() f.close() data = [" %r: %r,\n" % it for it in md5_data.items()] data.sort() repl = "".join(data) import inspect srcfile = inspect.getsourcefile(sys.modules[__name__]) f = open(srcfile, 'rb'); src = f.read(); f.close() match = re.search("\nmd5_data = {\n([^}]+)}", src) if not match: print >>sys.stderr, "Internal error!" sys.exit(2) src = src[:match.start(1)] + repl + src[match.end(1):] f = open(srcfile,'w') f.write(src) f.close() if __name__=='__main__': if len(sys.argv)>2 and sys.argv[1]=='--md5update': update_md5(sys.argv[2:]) else: main(sys.argv[1:])
dednal/chromium.src
refs/heads/nw12
build/android/pylib/base/base_test_result_unittest.py
134
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unittests for TestRunResults.""" import unittest from pylib.base.base_test_result import BaseTestResult from pylib.base.base_test_result import TestRunResults from pylib.base.base_test_result import ResultType class TestTestRunResults(unittest.TestCase): def setUp(self): self.p1 = BaseTestResult('p1', ResultType.PASS, log='pass1') other_p1 = BaseTestResult('p1', ResultType.PASS) self.p2 = BaseTestResult('p2', ResultType.PASS) self.f1 = BaseTestResult('f1', ResultType.FAIL, log='failure1') self.c1 = BaseTestResult('c1', ResultType.CRASH, log='crash1') self.u1 = BaseTestResult('u1', ResultType.UNKNOWN) self.tr = TestRunResults() self.tr.AddResult(self.p1) self.tr.AddResult(other_p1) self.tr.AddResult(self.p2) self.tr.AddResults(set([self.f1, self.c1, self.u1])) def testGetAll(self): self.assertFalse( self.tr.GetAll().symmetric_difference( [self.p1, self.p2, self.f1, self.c1, self.u1])) def testGetPass(self): self.assertFalse(self.tr.GetPass().symmetric_difference( [self.p1, self.p2])) def testGetNotPass(self): self.assertFalse(self.tr.GetNotPass().symmetric_difference( [self.f1, self.c1, self.u1])) def testGetAddTestRunResults(self): tr2 = TestRunResults() other_p1 = BaseTestResult('p1', ResultType.PASS) f2 = BaseTestResult('f2', ResultType.FAIL) tr2.AddResult(other_p1) tr2.AddResult(f2) tr2.AddTestRunResults(self.tr) self.assertFalse( tr2.GetAll().symmetric_difference( [self.p1, self.p2, self.f1, self.c1, self.u1, f2])) def testGetLogs(self): log_print = ('[FAIL] f1:\n' 'failure1\n' '[CRASH] c1:\n' 'crash1') self.assertEqual(self.tr.GetLogs(), log_print) def testGetShortForm(self): short_print = ('ALL: 5 PASS: 2 FAIL: 1 ' 'CRASH: 1 TIMEOUT: 0 UNKNOWN: 1 ') self.assertEqual(self.tr.GetShortForm(), short_print) def testGetGtestForm(self): gtest_print = ('[==========] 5 tests ran.\n' '[ PASSED ] 2 tests.\n' '[ FAILED ] 3 tests, listed below:\n' '[ FAILED ] f1\n' '[ FAILED ] c1 (CRASHED)\n' '[ FAILED ] u1 (UNKNOWN)\n' '\n' '3 FAILED TESTS') self.assertEqual(gtest_print, self.tr.GetGtestForm()) def testRunPassed(self): self.assertFalse(self.tr.DidRunPass()) tr2 = TestRunResults() self.assertTrue(tr2.DidRunPass()) if __name__ == '__main__': unittest.main()
SnappleCap/oh-mainline
refs/heads/master
vendor/packages/docutils/docutils/parsers/rst/languages/pt_br.py
128
# $Id: pt_br.py 7119 2011-09-02 13:00:23Z milde $ # Author: David Goodger <goodger@python.org> # Copyright: This module has been placed in the public domain. # New language mappings are welcome. Before doing a new translation, please # read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be # translated for each language: one in docutils/languages, the other in # docutils/parsers/rst/languages. """ Brazilian Portuguese-language mappings for language-dependent features of reStructuredText. """ __docformat__ = 'reStructuredText' directives = { # language-dependent: fixed u'aten\u00E7\u00E3o': 'attention', 'cuidado': 'caution', u'code (translation required)': 'code', 'perigo': 'danger', 'erro': 'error', u'sugest\u00E3o': 'hint', 'importante': 'important', 'nota': 'note', 'dica': 'tip', 'aviso': 'warning', u'exorta\u00E7\u00E3o': 'admonition', 'barra-lateral': 'sidebar', u't\u00F3pico': 'topic', 'bloco-de-linhas': 'line-block', 'literal-interpretado': 'parsed-literal', 'rubrica': 'rubric', u'ep\u00EDgrafo': 'epigraph', 'destaques': 'highlights', u'cita\u00E7\u00E3o-destacada': 'pull-quote', u'compound (translation required)': 'compound', u'container (translation required)': 'container', #'perguntas': 'questions', #'qa': 'questions', #'faq': 'questions', u'table (translation required)': 'table', u'csv-table (translation required)': 'csv-table', u'list-table (translation required)': 'list-table', 'meta': 'meta', 'math (translation required)': 'math', #'imagemap': 'imagemap', 'imagem': 'image', 'figura': 'figure', u'inclus\u00E3o': 'include', 'cru': 'raw', u'substitui\u00E7\u00E3o': 'replace', 'unicode': 'unicode', 'data': 'date', 'classe': 'class', 'role (translation required)': 'role', u'default-role (translation required)': 'default-role', u'title (translation required)': 'title', u'\u00EDndice': 'contents', 'numsec': 'sectnum', u'numera\u00E7\u00E3o-de-se\u00E7\u00F5es': 'sectnum', u'header (translation required)': 'header', u'footer (translation required)': 'footer', #u'notas-de-rorap\u00E9': 'footnotes', #u'cita\u00E7\u00F5es': 'citations', u'links-no-rodap\u00E9': 'target-notes', 'restructuredtext-test-directive': 'restructuredtext-test-directive'} """Brazilian Portuguese name to registered (in directives/__init__.py) directive name mapping.""" roles = { # language-dependent: fixed u'abbrevia\u00E7\u00E3o': 'abbreviation', 'ab': 'abbreviation', u'acr\u00F4nimo': 'acronym', 'ac': 'acronym', u'code (translation required)': 'code', u'\u00EDndice-remissivo': 'index', 'i': 'index', 'subscrito': 'subscript', 'sub': 'subscript', 'sobrescrito': 'superscript', 'sob': 'superscript', u'refer\u00EAncia-a-t\u00EDtulo': 'title-reference', u't\u00EDtulo': 'title-reference', 't': 'title-reference', u'refer\u00EAncia-a-pep': 'pep-reference', 'pep': 'pep-reference', u'refer\u00EAncia-a-rfc': 'rfc-reference', 'rfc': 'rfc-reference', u'\u00EAnfase': 'emphasis', 'forte': 'strong', 'literal': 'literal', 'math (translation required)': 'math', # translation required? u'refer\u00EAncia-por-nome': 'named-reference', u'refer\u00EAncia-an\u00F4nima': 'anonymous-reference', u'refer\u00EAncia-a-nota-de-rodap\u00E9': 'footnote-reference', u'refer\u00EAncia-a-cita\u00E7\u00E3o': 'citation-reference', u'refer\u00EAncia-a-substitui\u00E7\u00E3o': 'substitution-reference', 'alvo': 'target', u'refer\u00EAncia-a-uri': 'uri-reference', 'uri': 'uri-reference', 'url': 'uri-reference', 'cru': 'raw',} """Mapping of Brazilian Portuguese role names to canonical role names for interpreted text."""
timrichardson/google_charts_plugin
refs/heads/master
controllers/appadmin.py
9
# -*- coding: utf-8 -*- # ########################################################## # ## make sure administrator is on localhost # ########################################################### import os import socket import datetime import copy import gluon.contenttype import gluon.fileutils try: import pygraphviz as pgv except ImportError: pgv = None # ## critical --- make a copy of the environment global_env = copy.copy(globals()) global_env['datetime'] = datetime http_host = request.env.http_host.split(':')[0] remote_addr = request.env.remote_addr try: hosts = (http_host, socket.gethostname(), socket.gethostbyname(http_host), '::1', '127.0.0.1', '::ffff:127.0.0.1') except: hosts = (http_host, ) if request.env.http_x_forwarded_for or request.is_https: session.secure() elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1") and \ (request.function != 'manage'): raise HTTP(200, T('appadmin is disabled because insecure channel')) if request.function == 'manage': if not 'auth' in globals() or not request.args: redirect(URL(request.controller, 'index')) manager_action = auth.settings.manager_actions.get(request.args(0), None) if manager_action is None and request.args(0) == 'auth': manager_action = dict(role=auth.settings.auth_manager_role, heading=T('Manage Access Control'), tables=[auth.table_user(), auth.table_group(), auth.table_permission()]) manager_role = manager_action.get('role', None) if manager_action else None auth.requires_membership(manager_role)(lambda: None)() menu = False elif (request.application == 'admin' and not session.authorized) or \ (request.application != 'admin' and not gluon.fileutils.check_credentials(request)): redirect(URL('admin', 'default', 'index', vars=dict(send=URL(args=request.args, vars=request.vars)))) else: response.subtitle = 'Database Administration (appadmin)' menu = True ignore_rw = True response.view = 'appadmin.html' if menu: response.menu = [[T('design'), False, URL('admin', 'default', 'design', args=[request.application])], [T('db'), False, URL('index')], [T('state'), False, URL('state')], [T('cache'), False, URL('ccache')]] # ########################################################## # ## auxiliary functions # ########################################################### if False and request.tickets_db: from gluon.restricted import TicketStorage ts = TicketStorage() ts._get_table(request.tickets_db, ts.tablename, request.application) def get_databases(request): dbs = {} for (key, value) in global_env.items(): cond = False try: cond = isinstance(value, GQLDB) except: cond = isinstance(value, SQLDB) if cond: dbs[key] = value return dbs databases = get_databases(None) def eval_in_global_env(text): exec ('_ret=%s' % text, {}, global_env) return global_env['_ret'] def get_database(request): if request.args and request.args[0] in databases: return eval_in_global_env(request.args[0]) else: session.flash = T('invalid request') redirect(URL('index')) def get_table(request): db = get_database(request) if len(request.args) > 1 and request.args[1] in db.tables: return (db, request.args[1]) else: session.flash = T('invalid request') redirect(URL('index')) def get_query(request): try: return eval_in_global_env(request.vars.query) except Exception: return None def query_by_table_type(tablename, db, request=request): keyed = hasattr(db[tablename], '_primarykey') if keyed: firstkey = db[tablename][db[tablename]._primarykey[0]] cond = '>0' if firstkey.type in ['string', 'text']: cond = '!=""' qry = '%s.%s.%s%s' % ( request.args[0], request.args[1], firstkey.name, cond) else: qry = '%s.%s.id>0' % tuple(request.args[:2]) return qry # ########################################################## # ## list all databases and tables # ########################################################### def index(): return dict(databases=databases) # ########################################################## # ## insert a new record # ########################################################### def insert(): (db, table) = get_table(request) form = SQLFORM(db[table], ignore_rw=ignore_rw) if form.accepts(request.vars, session): response.flash = T('new record inserted') return dict(form=form, table=db[table]) # ########################################################## # ## list all records in table and insert new record # ########################################################### def download(): import os db = get_database(request) return response.download(request, db) def csv(): import gluon.contenttype response.headers['Content-Type'] = \ gluon.contenttype.contenttype('.csv') db = get_database(request) query = get_query(request) if not query: return None response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\ % tuple(request.vars.query.split('.')[:2]) return str(db(query, ignore_common_filters=True).select()) def import_csv(table, file): table.import_from_csv_file(file) def select(): import re db = get_database(request) dbname = request.args[0] regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>\d+)') if len(request.args) > 1 and hasattr(db[request.args[1]], '_primarykey'): regex = re.compile('(?P<table>\w+)\.(?P<field>\w+)=(?P<value>.+)') if request.vars.query: match = regex.match(request.vars.query) if match: request.vars.query = '%s.%s.%s==%s' % (request.args[0], match.group('table'), match.group('field'), match.group('value')) else: request.vars.query = session.last_query query = get_query(request) if request.vars.start: start = int(request.vars.start) else: start = 0 nrows = 0 stop = start + 100 table = None rows = [] orderby = request.vars.orderby if orderby: orderby = dbname + '.' + orderby if orderby == session.last_orderby: if orderby[0] == '~': orderby = orderby[1:] else: orderby = '~' + orderby session.last_orderby = orderby session.last_query = request.vars.query form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px', _name='query', _value=request.vars.query or '', requires=IS_NOT_EMPTY( error_message=T("Cannot be empty")))), TR(T('Update:'), INPUT(_name='update_check', _type='checkbox', value=False), INPUT(_style='width:400px', _name='update_fields', _value=request.vars.update_fields or '')), TR(T('Delete:'), INPUT(_name='delete_check', _class='delete', _type='checkbox', value=False), ''), TR('', '', INPUT(_type='submit', _value=T('submit')))), _action=URL(r=request, args=request.args)) tb = None if form.accepts(request.vars, formname=None): regex = re.compile(request.args[0] + '\.(?P<table>\w+)\..+') match = regex.match(form.vars.query.strip()) if match: table = match.group('table') try: nrows = db(query).count() if form.vars.update_check and form.vars.update_fields: db(query).update(**eval_in_global_env('dict(%s)' % form.vars.update_fields)) response.flash = T('%s %%{row} updated', nrows) elif form.vars.delete_check: db(query).delete() response.flash = T('%s %%{row} deleted', nrows) nrows = db(query).count() if orderby: rows = db(query, ignore_common_filters=True).select(limitby=( start, stop), orderby=eval_in_global_env(orderby)) else: rows = db(query, ignore_common_filters=True).select( limitby=(start, stop)) except Exception, e: import traceback tb = traceback.format_exc() (rows, nrows) = ([], 0) response.flash = DIV(T('Invalid Query'), PRE(str(e))) # begin handle upload csv csv_table = table or request.vars.table if csv_table: formcsv = FORM(str(T('or import from csv file')) + " ", INPUT(_type='file', _name='csvfile'), INPUT(_type='hidden', _value=csv_table, _name='table'), INPUT(_type='submit', _value=T('import'))) else: formcsv = None if formcsv and formcsv.process().accepted: try: import_csv(db[request.vars.table], request.vars.csvfile.file) response.flash = T('data uploaded') except Exception, e: response.flash = DIV(T('unable to parse csv file'), PRE(str(e))) # end handle upload csv return dict( form=form, table=table, start=start, stop=stop, nrows=nrows, rows=rows, query=request.vars.query, formcsv=formcsv, tb=tb, ) # ########################################################## # ## edit delete one record # ########################################################### def update(): (db, table) = get_table(request) keyed = hasattr(db[table], '_primarykey') record = None db[table]._common_filter = None if keyed: key = [f for f in request.vars if f in db[table]._primarykey] if key: record = db(db[table][key[0]] == request.vars[key[ 0]]).select().first() else: record = db(db[table].id == request.args( 2)).select().first() if not record: qry = query_by_table_type(table, db) session.flash = T('record does not exist') redirect(URL('select', args=request.args[:1], vars=dict(query=qry))) if keyed: for k in db[table]._primarykey: db[table][k].writable = False form = SQLFORM( db[table], record, deletable=True, delete_label=T('Check to delete'), ignore_rw=ignore_rw and not keyed, linkto=URL('select', args=request.args[:1]), upload=URL(r=request, f='download', args=request.args[:1])) if form.accepts(request.vars, session): session.flash = T('done!') qry = query_by_table_type(table, db) redirect(URL('select', args=request.args[:1], vars=dict(query=qry))) return dict(form=form, table=db[table]) # ########################################################## # ## get global variables # ########################################################### def state(): return dict() def ccache(): cache.ram.initialize() cache.disk.initialize() form = FORM( P(TAG.BUTTON( T("Clear CACHE?"), _type="submit", _name="yes", _value="yes")), P(TAG.BUTTON( T("Clear RAM"), _type="submit", _name="ram", _value="ram")), P(TAG.BUTTON( T("Clear DISK"), _type="submit", _name="disk", _value="disk")), ) if form.accepts(request.vars, session): clear_ram = False clear_disk = False session.flash = "" if request.vars.yes: clear_ram = clear_disk = True if request.vars.ram: clear_ram = True if request.vars.disk: clear_disk = True if clear_ram: cache.ram.clear() session.flash += T("Ram Cleared") if clear_disk: cache.disk.clear() session.flash += T("Disk Cleared") redirect(URL(r=request)) try: from guppy import hpy hp = hpy() except ImportError: hp = False import shelve import os import copy import time import math from gluon import portalocker ram = { 'entries': 0, 'bytes': 0, 'objects': 0, 'hits': 0, 'misses': 0, 'ratio': 0, 'oldest': time.time(), 'keys': [] } disk = copy.copy(ram) total = copy.copy(ram) disk['keys'] = [] total['keys'] = [] def GetInHMS(seconds): hours = math.floor(seconds / 3600) seconds -= hours * 3600 minutes = math.floor(seconds / 60) seconds -= minutes * 60 seconds = math.floor(seconds) return (hours, minutes, seconds) for key, value in cache.ram.storage.iteritems(): if isinstance(value, dict): ram['hits'] = value['hit_total'] - value['misses'] ram['misses'] = value['misses'] try: ram['ratio'] = ram['hits'] * 100 / value['hit_total'] except (KeyError, ZeroDivisionError): ram['ratio'] = 0 else: if hp: ram['bytes'] += hp.iso(value[1]).size ram['objects'] += hp.iso(value[1]).count ram['entries'] += 1 if value[0] < ram['oldest']: ram['oldest'] = value[0] ram['keys'].append((key, GetInHMS(time.time() - value[0]))) folder = os.path.join(request.folder,'cache') if not os.path.exists(folder): os.mkdir(folder) locker = open(os.path.join(folder, 'cache.lock'), 'a') portalocker.lock(locker, portalocker.LOCK_EX) disk_storage = shelve.open( os.path.join(folder, 'cache.shelve')) try: for key, value in disk_storage.items(): if isinstance(value, dict): disk['hits'] = value['hit_total'] - value['misses'] disk['misses'] = value['misses'] try: disk['ratio'] = disk['hits'] * 100 / value['hit_total'] except (KeyError, ZeroDivisionError): disk['ratio'] = 0 else: if hp: disk['bytes'] += hp.iso(value[1]).size disk['objects'] += hp.iso(value[1]).count disk['entries'] += 1 if value[0] < disk['oldest']: disk['oldest'] = value[0] disk['keys'].append((key, GetInHMS(time.time() - value[0]))) finally: portalocker.unlock(locker) locker.close() disk_storage.close() total['entries'] = ram['entries'] + disk['entries'] total['bytes'] = ram['bytes'] + disk['bytes'] total['objects'] = ram['objects'] + disk['objects'] total['hits'] = ram['hits'] + disk['hits'] total['misses'] = ram['misses'] + disk['misses'] total['keys'] = ram['keys'] + disk['keys'] try: total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses']) except (KeyError, ZeroDivisionError): total['ratio'] = 0 if disk['oldest'] < ram['oldest']: total['oldest'] = disk['oldest'] else: total['oldest'] = ram['oldest'] ram['oldest'] = GetInHMS(time.time() - ram['oldest']) disk['oldest'] = GetInHMS(time.time() - disk['oldest']) total['oldest'] = GetInHMS(time.time() - total['oldest']) def key_table(keys): return TABLE( TR(TD(B(T('Key'))), TD(B(T('Time in Cache (h:m:s)')))), *[TR(TD(k[0]), TD('%02d:%02d:%02d' % k[1])) for k in keys], **dict(_class='cache-keys', _style="border-collapse: separate; border-spacing: .5em;")) ram['keys'] = key_table(ram['keys']) disk['keys'] = key_table(disk['keys']) total['keys'] = key_table(total['keys']) return dict(form=form, total=total, ram=ram, disk=disk, object_stats=hp != False) def table_template(table): from gluon.html import TR, TD, TABLE, TAG def FONT(*args, **kwargs): return TAG.font(*args, **kwargs) def types(field): f_type = field.type if not isinstance(f_type,str): return ' ' elif f_type == 'string': return field.length elif f_type == 'id': return B('pk') elif f_type.startswith('reference') or \ f_type.startswith('list:reference'): return B('fk') else: return ' ' # This is horribe HTML but the only one graphiz understands rows = [] cellpadding = 4 color = "#000000" bgcolor = "#FFFFFF" face = "Helvetica" face_bold = "Helvetica Bold" border = 0 rows.append(TR(TD(FONT(table, _face=face_bold, _color=bgcolor), _colspan=3, _cellpadding=cellpadding, _align="center", _bgcolor=color))) for row in db[table]: rows.append(TR(TD(FONT(row.name, _color=color, _face=face_bold), _align="left", _cellpadding=cellpadding, _border=border), TD(FONT(row.type, _color=color, _face=face), _align="left", _cellpadding=cellpadding, _border=border), TD(FONT(types(row), _color=color, _face=face), _align="center", _cellpadding=cellpadding, _border=border))) return "< %s >" % TABLE(*rows, **dict(_bgcolor=bgcolor, _border=1, _cellborder=0, _cellspacing=0) ).xml() def bg_graph_model(): graph = pgv.AGraph(layout='dot', directed=True, strict=False, rankdir='LR') subgraphs = dict() for tablename in db.tables: if hasattr(db[tablename],'_meta_graphmodel'): meta_graphmodel = db[tablename]._meta_graphmodel else: meta_graphmodel = dict(group='Undefined', color='#ECECEC') group = meta_graphmodel['group'].replace(' ', '') if not subgraphs.has_key(group): subgraphs[group] = dict(meta=meta_graphmodel, tables=[]) subgraphs[group]['tables'].append(tablename) else: subgraphs[group]['tables'].append(tablename) graph.add_node(tablename, name=tablename, shape='plaintext', label=table_template(tablename)) for n, key in enumerate(subgraphs.iterkeys()): graph.subgraph(nbunch=subgraphs[key]['tables'], name='cluster%d' % n, style='filled', color=subgraphs[key]['meta']['color'], label=subgraphs[key]['meta']['group']) for tablename in db.tables: for field in db[tablename]: f_type = field.type if isinstance(f_type,str) and ( f_type.startswith('reference') or f_type.startswith('list:reference')): referenced_table = f_type.split()[1].split('.')[0] n1 = graph.get_node(tablename) n2 = graph.get_node(referenced_table) graph.add_edge(n1, n2, color="#4C4C4C", label='') graph.layout() #return graph.draw(format='png', prog='dot') if not request.args: return graph.draw(format='png', prog='dot') else: response.headers['Content-Disposition']='attachment;filename=graph.%s'%request.args(0) if request.args(0) == 'dot': return graph.string() else: return graph.draw(format=request.args(0), prog='dot') def graph_model(): return dict(databases=databases, pgv=pgv) def manage(): tables = manager_action['tables'] if isinstance(tables[0], str): db = manager_action.get('db', auth.db) db = globals()[db] if isinstance(db, str) else db tables = [db[table] for table in tables] if request.args(0) == 'auth': auth.table_user()._plural = T('Users') auth.table_group()._plural = T('Roles') auth.table_membership()._plural = T('Memberships') auth.table_permission()._plural = T('Permissions') if request.extension != 'load': return dict(heading=manager_action.get('heading', T('Manage %(action)s') % dict(action=request.args(0).replace('_', ' ').title())), tablenames=[table._tablename for table in tables], labels=[table._plural.title() for table in tables]) table = tables[request.args(1, cast=int)] formname = '%s_grid' % table._tablename linked_tables = orderby = None if request.args(0) == 'auth': auth.table_group()._id.readable = \ auth.table_membership()._id.readable = \ auth.table_permission()._id.readable = False auth.table_membership().user_id.label = T('User') auth.table_membership().group_id.label = T('Role') auth.table_permission().group_id.label = T('Role') auth.table_permission().name.label = T('Permission') if table == auth.table_user(): linked_tables=[auth.settings.table_membership_name] elif table == auth.table_group(): orderby = 'role' if not request.args(3) or '.group_id' not in request.args(3) else None elif table == auth.table_permission(): orderby = 'group_id' kwargs = dict(user_signature=True, maxtextlength=1000, orderby=orderby, linked_tables=linked_tables) smartgrid_args = manager_action.get('smartgrid_args', {}) kwargs.update(**smartgrid_args.get('DEFAULT', {})) kwargs.update(**smartgrid_args.get(table._tablename, {})) grid = SQLFORM.smartgrid(table, args=request.args[:2], formname=formname, **kwargs) return grid
RJPercival/certificate-transparency
refs/heads/master
python/utilities/log_list/java_generator.py
30
import datetime import textwrap import base64 def _write_java_header(output, package): year = datetime.date.today().year output.write( "/*\n" " * Copyright (C) %(year)d The Android Open Source Project\n" " *\n" " * Licensed under the Apache License, Version 2.0 (the \"License\");\n" " * you may not use this file except in compliance with the License.\n" " * You may obtain a copy of the License at\n" " *\n" " * http://www.apache.org/licenses/LICENSE-2.0\n" " *\n" " * Unless required by applicable law or agreed to in writing, software\n" " * distributed under the License is distributed on an \"AS IS\" BASIS,\n" " * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" " * See the License for the specific language governing permissions and\n" " * limitations under the License.\n" " */\n\n" "/* This file is generated by print_log_list.py\n" " * https://github.com/google/certificate-transparency/blob/master/python/utilities/log_list/print_log_list.py */\n\n" "package %(package)s;\n\n" % {"year": year, "package": package}) def _encode_key(description, key): unsigned = (ord(c) for c in key) signed = (u - 256 if u > 127 else u for u in unsigned) array = textwrap.fill(", ".join("%d" % s for s in signed), width=85, initial_indent=" " * 3, subsequent_indent=" " * 3) return (" // %(description)s\n" " new byte[] {\n" "%(array)s\n" " },\n" % { "description": description, "array": array }) def _write_java_class(output, logs, class_name): descriptions = (' "%s",\n' % log["description"] for log in logs) urls = (' "%s",\n' % log["url"] for log in logs) keys = (_encode_key(log["description"], base64.decodestring(log["key"])) for log in logs) output.write( "public final class %(class_name)s {\n" " public static final int LOG_COUNT = %(count)d;\n" " public static final String[] LOG_DESCRIPTIONS = new String[] {\n" "%(descriptions)s" " };\n" " public static final String[] LOG_URLS = new String[] {\n" "%(urls)s" " };\n" " public static final byte[][] LOG_KEYS = new byte[][] {\n" "%(keys)s" " };\n" "}\n" % { "class_name": class_name, "count": len(logs), "descriptions": "".join(descriptions), "urls": "".join(urls), "keys": "".join(keys) }) def generate_java_source(json_log_list, output_path, class_name): with open(output_path, "w") as output: logs = json_log_list["logs"] [pkg, cls] = class_name.rsplit('.', 1) _write_java_header(output, pkg) _write_java_class(output, logs, cls)
WandyYing/robotframework-selenium2library
refs/heads/master
test/run_tests.py
14
#!/usr/bin/env python import env import os import sys from subprocess import Popen, call from tempfile import TemporaryFile from run_unit_tests import run_unit_tests ROBOT_ARGS = [ '--doc', 'SeleniumSPacceptanceSPtestsSPwithSP%(browser)s', '--outputdir', '%(outdir)s', '--variable', 'browser:%(browser)s', '--escape', 'space:SP', '--report', 'none', '--log', 'none', #'--suite', 'Acceptance.Keywords.Textfields', '--loglevel', 'DEBUG', '--pythonpath', '%(pythonpath)s', ] REBOT_ARGS = [ '--outputdir', '%(outdir)s', '--name', '%(browser)sSPAcceptanceSPTests', '--escape', 'space:SP', '--critical', 'regression', '--noncritical', 'inprogress', ] ARG_VALUES = {'outdir': env.RESULTS_DIR, 'pythonpath': env.SRC_DIR} def acceptance_tests(interpreter, browser, args): ARG_VALUES['browser'] = browser.replace('*', '') start_http_server() runner = {'python': 'pybot', 'jython': 'jybot', 'ipy': 'ipybot'}[interpreter] if os.sep == '\\': runner += '.bat' execute_tests(runner, args) stop_http_server() return process_output(args) def start_http_server(): server_output = TemporaryFile() Popen(['python', env.HTTP_SERVER_FILE ,'start'], stdout=server_output, stderr=server_output) def execute_tests(runner, args): if not os.path.exists(env.RESULTS_DIR): os.mkdir(env.RESULTS_DIR) command = [runner] + [arg % ARG_VALUES for arg in ROBOT_ARGS] + args + [env.ACCEPTANCE_TEST_DIR] print '' print 'Starting test execution with command:\n' + ' '.join(command) syslog = os.path.join(env.RESULTS_DIR, 'syslog.txt') call(command, shell=os.sep=='\\', env=dict(os.environ, ROBOT_SYSLOG_FILE=syslog)) def stop_http_server(): call(['python', env.HTTP_SERVER_FILE, 'stop']) def process_output(args): print if _has_robot_27(): call(['python', os.path.join(env.RESOURCES_DIR, 'statuschecker.py'), os.path.join(env.RESULTS_DIR, 'output.xml')]) rebot = 'rebot' if os.sep == '/' else 'rebot.bat' rebot_cmd = [rebot] + [ arg % ARG_VALUES for arg in REBOT_ARGS ] + args + \ [os.path.join(ARG_VALUES['outdir'], 'output.xml') ] rc = call(rebot_cmd, env=os.environ) if rc == 0: print 'All critical tests passed' else: print '%d critical test%s failed' % (rc, 's' if rc != 1 else '') return rc def _has_robot_27(): try: from robot.result import ExecutionResult except: return False return True def _exit(rc): sys.exit(rc) def _help(): print 'Usage: python run_tests.py python|jython browser [options]' print print 'See README.txt for details.' return 255 def _run_unit_tests(): print 'Running unit tests' failures = run_unit_tests() if failures != 0: print '\n%d unit tests failed - not running acceptance tests!' % failures else: print 'All unit tests passed' return failures if __name__ == '__main__': if not len(sys.argv) > 2: _exit(_help()) unit_failures = _run_unit_tests() if unit_failures: _exit(unit_failures) interpreter = sys.argv[1] browser = sys.argv[2].lower() args = sys.argv[3:] if browser != 'unit': _exit(acceptance_tests(interpreter, browser, args))
jiajiax/crosswalk-test-suite
refs/heads/master
webapi/tct-csp-w3c-tests/csp-py/csp_font-src_cross-origin_multi_blocked-manual.py
25
def main(request, response): import simplejson as json f = file('config.json') source = f.read() s = json.JSONDecoder().decode(source) url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1]) url2 = "http://" + s['host'] + ":" + str(s['ports']['http'][0]) response.headers.set( "Content-Security-Policy", "font-src http://www.tizen.com http://tizen.org; style-src 'unsafe-inline'") response.headers.set( "X-Content-Security-Policy", "font-src http://www.tizen.com http://tizen.org; style-src 'unsafe-inline'") response.headers.set( "X-WebKit-CSP", "font-src http://www.tizen.com http://tizen.org; style-src 'unsafe-inline'") return """<!DOCTYPE html> <!-- Copyright (c) 2013 Intel Corporation. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of works must retain the original copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the original copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this work without specific prior written permission. THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Authors: Hao, Yunfei <yunfeix.hao@intel.com> --> <html> <head> <title>CSP Test: csp_font-src_cross-origin_multi_blocked</title> <link rel="author" title="Intel" href="http://www.intel.com"/> <link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#font-src"/> <meta name="flags" content=""/> <meta charset="utf-8"/> <style> @font-face { font-family: Canvas; src: url('""" + url1 + """/tests/csp/support/w3c/csstest-basic-bold.ttf'); } #test { font-family: Canvas; } </style> </head> <body> <p>Test passes if the two lines are same in font</p> <div id="test">1234 ABCD</div> <div>1234 ABCD</div> </body> </html> """
dannyboi104/SickRage
refs/heads/master
sickbeard/show_name_helpers.py
3
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import fnmatch import os import re import datetime from functools import partial import sickbeard from sickbeard import common from sickbeard.helpers import sanitizeSceneName from sickbeard.scene_exceptions import get_scene_exceptions from sickbeard import logger from sickbeard import db from sickbeard import encodingKludge as ek from name_parser.parser import NameParser, InvalidNameException, InvalidShowException resultFilters = ["sub(bed|ed|pack|s)", "(dk|fin|heb|kor|nor|nordic|pl|swe)sub(bed|ed|s)?", "(dir|sample|sub|nfo)fix", "sample", "(dvd)?extras", "dub(bed)?"] def containsAtLeastOneWord(name, words): """ Filters out results based on filter_words name: name to check words : string of words separated by a ',' or list of words Returns: False if the name doesn't contain any word of words list, or the found word from the list. """ if isinstance(words, basestring): words = words.split(',') items = [(re.compile('(^|[\W_])%s($|[\W_])' % re.escape(word.strip()), re.I), word.strip()) for word in words] for regexp, word in items: if regexp.search(name): return word return False def filterBadReleases(name, parse=True): """ Filters out non-english and just all-around stupid releases by comparing them to the resultFilters contents. name: the release name to check Returns: True if the release name is OK, False if it's bad. """ try: if parse: NameParser().parse(name) except InvalidNameException: logger.log(u"Unable to parse the filename " + name + " into a valid episode", logger.DEBUG) return False except InvalidShowException: pass # logger.log(u"Unable to parse the filename " + name + " into a valid show", logger.DEBUG) # return False # if any of the bad strings are in the name then say no ignore_words = list(resultFilters) if sickbeard.IGNORE_WORDS: ignore_words.extend(sickbeard.IGNORE_WORDS.split(',')) word = containsAtLeastOneWord(name, ignore_words) if word: logger.log(u"Invalid scene release: " + name + " contains " + word + ", ignoring it", logger.DEBUG) return False # if any of the good strings aren't in the name then say no if sickbeard.REQUIRE_WORDS: require_words = sickbeard.REQUIRE_WORDS if not containsAtLeastOneWord(name, require_words): logger.log(u"Invalid scene release: " + name + " doesn't contain any of " + sickbeard.REQUIRE_WORDS + ", ignoring it", logger.DEBUG) return False return True def sceneToNormalShowNames(name): """ Takes a show name from a scene dirname and converts it to a more "human-readable" format. name: The show name to convert Returns: a list of all the possible "normal" names """ if not name: return [] name_list = [name] # use both and and & new_name = re.sub('(?i)([\. ])and([\. ])', '\\1&\\2', name, re.I) if new_name not in name_list: name_list.append(new_name) results = [] for cur_name in name_list: # add brackets around the year results.append(re.sub('(\D)(\d{4})$', '\\1(\\2)', cur_name)) # add brackets around the country country_match_str = '|'.join(common.countryList.values()) results.append(re.sub('(?i)([. _-])(' + country_match_str + ')$', '\\1(\\2)', cur_name)) results += name_list return list(set(results)) def makeSceneShowSearchStrings(show, season=-1, anime=False): showNames = allPossibleShowNames(show, season=season) # scenify the names if anime: sanitizeSceneNameAnime = partial(sanitizeSceneName, anime=True) return map(sanitizeSceneNameAnime, showNames) else: return map(sanitizeSceneName, showNames) def makeSceneSeasonSearchString(show, ep_obj, extraSearchType=None): if show.air_by_date or show.sports: numseasons = 0 # the search string for air by date shows is just seasonStrings = [str(ep_obj.airdate).split('-')[0]] elif show.is_anime: numseasons = 0 seasonEps = show.getAllEpisodes(ep_obj.season) # get show qualities anyQualities, bestQualities = common.Quality.splitQuality(show.quality) # compile a list of all the episode numbers we need in this 'season' seasonStrings = [] for episode in seasonEps: # get quality of the episode curCompositeStatus = episode.status curStatus, curQuality = common.Quality.splitCompositeStatus(curCompositeStatus) if bestQualities: highestBestQuality = max(bestQualities) else: highestBestQuality = 0 # if we need a better one then add it to the list of episodes to fetch if (curStatus in ( common.DOWNLOADED, common.SNATCHED) and curQuality < highestBestQuality) or curStatus == common.WANTED: ab_number = episode.scene_absolute_number if ab_number > 0: seasonStrings.append("%02d" % ab_number) else: myDB = db.DBConnection() numseasonsSQlResult = myDB.select( "SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [show.indexerid]) numseasons = int(numseasonsSQlResult[0][0]) seasonStrings = ["S%02d" % int(ep_obj.scene_season)] showNames = set(makeSceneShowSearchStrings(show, ep_obj.scene_season)) toReturn = [] # search each show name for curShow in showNames: # most providers all work the same way if not extraSearchType: # if there's only one season then we can just use the show name straight up if numseasons == 1: toReturn.append(curShow) # for providers that don't allow multiple searches in one request we only search for Sxx style stuff else: for cur_season in seasonStrings: if ep_obj.show.is_anime: if ep_obj.show.release_groups is not None: if len(show.release_groups.whitelist) > 0: for keyword in show.release_groups.whitelist: toReturn.append(keyword + '.' + curShow+ "." + cur_season) else: toReturn.append(curShow + "." + cur_season) return toReturn def makeSceneSearchString(show, ep_obj): myDB = db.DBConnection() numseasonsSQlResult = myDB.select( "SELECT COUNT(DISTINCT season) as numseasons FROM tv_episodes WHERE showid = ? and season != 0", [show.indexerid]) numseasons = int(numseasonsSQlResult[0][0]) # see if we should use dates instead of episodes if (show.air_by_date or show.sports) and ep_obj.airdate != datetime.date.fromordinal(1): epStrings = [str(ep_obj.airdate)] elif show.is_anime: epStrings = ["%02i" % int(ep_obj.scene_absolute_number if ep_obj.scene_absolute_number > 0 else ep_obj.scene_episode)] else: epStrings = ["S%02iE%02i" % (int(ep_obj.scene_season), int(ep_obj.scene_episode)), "%ix%02i" % (int(ep_obj.scene_season), int(ep_obj.scene_episode))] # for single-season shows just search for the show name -- if total ep count (exclude s0) is less than 11 # due to the amount of qualities and releases, it is easy to go over the 50 result limit on rss feeds otherwise if numseasons == 1 and not ep_obj.show.is_anime: epStrings = [''] showNames = set(makeSceneShowSearchStrings(show, ep_obj.scene_season)) toReturn = [] for curShow in showNames: for curEpString in epStrings: if ep_obj.show.is_anime: if ep_obj.show.release_groups is not None: if len(ep_obj.show.release_groups.whitelist) > 0: for keyword in ep_obj.show.release_groups.whitelist: toReturn.append(keyword + '.' + curShow + '.' + curEpString) else: toReturn.append(curShow + '.' + curEpString) return toReturn def isGoodResult(name, show, log=True, season=-1): """ Use an automatically-created regex to make sure the result actually is the show it claims to be """ all_show_names = allPossibleShowNames(show, season=season) showNames = map(sanitizeSceneName, all_show_names) + all_show_names showNames += map(ek.ss, all_show_names) for curName in set(showNames): if not show.is_anime: escaped_name = re.sub('\\\\[\\s.-]', '\W+', re.escape(curName)) if show.startyear: escaped_name += "(?:\W+" + str(show.startyear) + ")?" curRegex = '^' + escaped_name + '\W+(?:(?:S\d[\dE._ -])|(?:\d\d?x)|(?:\d{4}\W\d\d\W\d\d)|(?:(?:part|pt)[\._ -]?(\d|[ivx]))|Season\W+\d+\W+|E\d+\W+|(?:\d{1,3}.+\d{1,}[a-zA-Z]{2}\W+[a-zA-Z]{3,}\W+\d{4}.+))' else: escaped_name = re.sub('\\\\[\\s.-]', '[\W_]+', re.escape(curName)) # FIXME: find a "automatically-created" regex for anime releases # test at http://regexr.com?2uon3 curRegex = '^((\[.*?\])|(\d+[\.-]))*[ _\.]*' + escaped_name + '(([ ._-]+\d+)|([ ._-]+s\d{2})).*' if log: logger.log(u"Checking if show " + name + " matches " + curRegex, logger.DEBUG) match = re.search(curRegex, name, re.I) if match: logger.log(u"Matched " + curRegex + " to " + name, logger.DEBUG) return True if log: logger.log( u"Provider gave result " + name + " but that doesn't seem like a valid result for " + show.name + " so I'm ignoring it") return False def allPossibleShowNames(show, season=-1): """ Figures out every possible variation of the name for a particular show. Includes TVDB name, TVRage name, country codes on the end, eg. "Show Name (AU)", and any scene exception names. show: a TVShow object that we should get the names of Returns: a list of all the possible show names """ showNames = get_scene_exceptions(show.indexerid, season=season)[:] if not showNames: # if we dont have any season specific exceptions fallback to generic exceptions season = -1 showNames = get_scene_exceptions(show.indexerid, season=season)[:] if season in [-1, 1]: showNames.append(show.name) if not show.is_anime: newShowNames = [] country_list = common.countryList country_list.update(dict(zip(common.countryList.values(), common.countryList.keys()))) for curName in set(showNames): if not curName: continue # if we have "Show Name Australia" or "Show Name (Australia)" this will add "Show Name (AU)" for # any countries defined in common.countryList # (and vice versa) for curCountry in country_list: if curName.endswith(' ' + curCountry): newShowNames.append(curName.replace(' ' + curCountry, ' (' + country_list[curCountry] + ')')) elif curName.endswith(' (' + curCountry + ')'): newShowNames.append(curName.replace(' (' + curCountry + ')', ' (' + country_list[curCountry] + ')')) # if we have "Show Name (2013)" this will strip the (2013) show year from the show name #newShowNames.append(re.sub('\(\d{4}\)','',curName)) showNames += newShowNames return showNames def determineReleaseName(dir_name=None, nzb_name=None): """Determine a release name from an nzb and/or folder name""" if nzb_name is not None: logger.log(u"Using nzb_name for release name.") return nzb_name.rpartition('.')[0] if dir_name is None: return None # try to get the release name from nzb/nfo file_types = ["*.nzb", "*.nfo"] for search in file_types: reg_expr = re.compile(fnmatch.translate(search), re.IGNORECASE) files = [file_name for file_name in ek.ek(os.listdir, dir_name) if ek.ek(os.path.isfile, ek.ek(os.path.join, dir_name, file_name))] results = filter(reg_expr.search, files) if len(results) == 1: found_file = ek.ek(os.path.basename, results[0]) found_file = found_file.rpartition('.')[0] if filterBadReleases(found_file): logger.log(u"Release name (" + found_file + ") found from file (" + results[0] + ")") return found_file.rpartition('.')[0] # If that fails, we try the folder folder = ek.ek(os.path.basename, dir_name) if filterBadReleases(folder): # NOTE: Multiple failed downloads will change the folder name. # (e.g., appending #s) # Should we handle that? logger.log(u"Folder name (" + folder + ") appears to be a valid release name. Using it.", logger.DEBUG) return folder return None
ammarkhann/FinalSeniorCode
refs/heads/master
lib/python2.7/site-packages/numpy/doc/glossary.py
40
""" ======== Glossary ======== .. glossary:: along an axis Axes are defined for arrays with more than one dimension. A 2-dimensional array has two corresponding axes: the first running vertically downwards across rows (axis 0), and the second running horizontally across columns (axis 1). Many operation can take place along one of these axes. For example, we can sum each row of an array, in which case we operate along columns, or axis 1:: >>> x = np.arange(12).reshape((3,4)) >>> x array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.sum(axis=1) array([ 6, 22, 38]) array A homogeneous container of numerical elements. Each element in the array occupies a fixed amount of memory (hence homogeneous), and can be a numerical element of a single type (such as float, int or complex) or a combination (such as ``(float, int, float)``). Each array has an associated data-type (or ``dtype``), which describes the numerical type of its elements:: >>> x = np.array([1, 2, 3], float) >>> x array([ 1., 2., 3.]) >>> x.dtype # floating point number, 64 bits of memory per element dtype('float64') # More complicated data type: each array element is a combination of # and integer and a floating point number >>> np.array([(1, 2.0), (3, 4.0)], dtype=[('x', int), ('y', float)]) array([(1, 2.0), (3, 4.0)], dtype=[('x', '<i4'), ('y', '<f8')]) Fast element-wise operations, called `ufuncs`_, operate on arrays. array_like Any sequence that can be interpreted as an ndarray. This includes nested lists, tuples, scalars and existing arrays. attribute A property of an object that can be accessed using ``obj.attribute``, e.g., ``shape`` is an attribute of an array:: >>> x = np.array([1, 2, 3]) >>> x.shape (3,) BLAS `Basic Linear Algebra Subprograms <http://en.wikipedia.org/wiki/BLAS>`_ broadcast NumPy can do operations on arrays whose shapes are mismatched:: >>> x = np.array([1, 2]) >>> y = np.array([[3], [4]]) >>> x array([1, 2]) >>> y array([[3], [4]]) >>> x + y array([[4, 5], [5, 6]]) See `doc.broadcasting`_ for more information. C order See `row-major` column-major A way to represent items in a N-dimensional array in the 1-dimensional computer memory. In column-major order, the leftmost index "varies the fastest": for example the array:: [[1, 2, 3], [4, 5, 6]] is represented in the column-major order as:: [1, 4, 2, 5, 3, 6] Column-major order is also known as the Fortran order, as the Fortran programming language uses it. decorator An operator that transforms a function. For example, a ``log`` decorator may be defined to print debugging information upon function execution:: >>> def log(f): ... def new_logging_func(*args, **kwargs): ... print("Logging call with parameters:", args, kwargs) ... return f(*args, **kwargs) ... ... return new_logging_func Now, when we define a function, we can "decorate" it using ``log``:: >>> @log ... def add(a, b): ... return a + b Calling ``add`` then yields: >>> add(1, 2) Logging call with parameters: (1, 2) {} 3 dictionary Resembling a language dictionary, which provides a mapping between words and descriptions thereof, a Python dictionary is a mapping between two objects:: >>> x = {1: 'one', 'two': [1, 2]} Here, `x` is a dictionary mapping keys to values, in this case the integer 1 to the string "one", and the string "two" to the list ``[1, 2]``. The values may be accessed using their corresponding keys:: >>> x[1] 'one' >>> x['two'] [1, 2] Note that dictionaries are not stored in any specific order. Also, most mutable (see *immutable* below) objects, such as lists, may not be used as keys. For more information on dictionaries, read the `Python tutorial <http://docs.python.org/tut>`_. Fortran order See `column-major` flattened Collapsed to a one-dimensional array. See `ndarray.flatten`_ for details. immutable An object that cannot be modified after execution is called immutable. Two common examples are strings and tuples. instance A class definition gives the blueprint for constructing an object:: >>> class House(object): ... wall_colour = 'white' Yet, we have to *build* a house before it exists:: >>> h = House() # build a house Now, ``h`` is called a ``House`` instance. An instance is therefore a specific realisation of a class. iterable A sequence that allows "walking" (iterating) over items, typically using a loop such as:: >>> x = [1, 2, 3] >>> [item**2 for item in x] [1, 4, 9] It is often used in combination with ``enumerate``:: >>> keys = ['a','b','c'] >>> for n, k in enumerate(keys): ... print("Key %d: %s" % (n, k)) ... Key 0: a Key 1: b Key 2: c list A Python container that can hold any number of objects or items. The items do not have to be of the same type, and can even be lists themselves:: >>> x = [2, 2.0, "two", [2, 2.0]] The list `x` contains 4 items, each which can be accessed individually:: >>> x[2] # the string 'two' 'two' >>> x[3] # a list, containing an integer 2 and a float 2.0 [2, 2.0] It is also possible to select more than one item at a time, using *slicing*:: >>> x[0:2] # or, equivalently, x[:2] [2, 2.0] In code, arrays are often conveniently expressed as nested lists:: >>> np.array([[1, 2], [3, 4]]) array([[1, 2], [3, 4]]) For more information, read the section on lists in the `Python tutorial <http://docs.python.org/tut>`_. For a mapping type (key-value), see *dictionary*. mask A boolean array, used to select only certain elements for an operation:: >>> x = np.arange(5) >>> x array([0, 1, 2, 3, 4]) >>> mask = (x > 2) >>> mask array([False, False, False, True, True], dtype=bool) >>> x[mask] = -1 >>> x array([ 0, 1, 2, -1, -1]) masked array Array that suppressed values indicated by a mask:: >>> x = np.ma.masked_array([np.nan, 2, np.nan], [True, False, True]) >>> x masked_array(data = [-- 2.0 --], mask = [ True False True], fill_value = 1e+20) <BLANKLINE> >>> x + [1, 2, 3] masked_array(data = [-- 4.0 --], mask = [ True False True], fill_value = 1e+20) <BLANKLINE> Masked arrays are often used when operating on arrays containing missing or invalid entries. matrix A 2-dimensional ndarray that preserves its two-dimensional nature throughout operations. It has certain special operations, such as ``*`` (matrix multiplication) and ``**`` (matrix power), defined:: >>> x = np.mat([[1, 2], [3, 4]]) >>> x matrix([[1, 2], [3, 4]]) >>> x**2 matrix([[ 7, 10], [15, 22]]) method A function associated with an object. For example, each ndarray has a method called ``repeat``:: >>> x = np.array([1, 2, 3]) >>> x.repeat(2) array([1, 1, 2, 2, 3, 3]) ndarray See *array*. record array An `ndarray`_ with `structured data type`_ which has been subclassed as np.recarray and whose dtype is of type np.record, making the fields of its data type to be accessible by attribute. reference If ``a`` is a reference to ``b``, then ``(a is b) == True``. Therefore, ``a`` and ``b`` are different names for the same Python object. row-major A way to represent items in a N-dimensional array in the 1-dimensional computer memory. In row-major order, the rightmost index "varies the fastest": for example the array:: [[1, 2, 3], [4, 5, 6]] is represented in the row-major order as:: [1, 2, 3, 4, 5, 6] Row-major order is also known as the C order, as the C programming language uses it. New NumPy arrays are by default in row-major order. self Often seen in method signatures, ``self`` refers to the instance of the associated class. For example: >>> class Paintbrush(object): ... color = 'blue' ... ... def paint(self): ... print("Painting the city %s!" % self.color) ... >>> p = Paintbrush() >>> p.color = 'red' >>> p.paint() # self refers to 'p' Painting the city red! slice Used to select only certain elements from a sequence:: >>> x = range(5) >>> x [0, 1, 2, 3, 4] >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) [1, 2] >>> x[1:5:2] # slice from 1 to 5, but skipping every second element [1, 3] >>> x[::-1] # slice a sequence in reverse [4, 3, 2, 1, 0] Arrays may have more than one dimension, each which can be sliced individually:: >>> x = np.array([[1, 2], [3, 4]]) >>> x array([[1, 2], [3, 4]]) >>> x[:, 1] array([2, 4]) structured data type A data type composed of other datatypes tuple A sequence that may contain a variable number of types of any kind. A tuple is immutable, i.e., once constructed it cannot be changed. Similar to a list, it can be indexed and sliced:: >>> x = (1, 'one', [1, 2]) >>> x (1, 'one', [1, 2]) >>> x[0] 1 >>> x[:2] (1, 'one') A useful concept is "tuple unpacking", which allows variables to be assigned to the contents of a tuple:: >>> x, y = (1, 2) >>> x, y = 1, 2 This is often used when a function returns multiple values: >>> def return_many(): ... return 1, 'alpha', None >>> a, b, c = return_many() >>> a, b, c (1, 'alpha', None) >>> a 1 >>> b 'alpha' ufunc Universal function. A fast element-wise array operation. Examples include ``add``, ``sin`` and ``logical_or``. view An array that does not own its data, but refers to another array's data instead. For example, we may create a view that only shows every second element of another array:: >>> x = np.arange(5) >>> x array([0, 1, 2, 3, 4]) >>> y = x[::2] >>> y array([0, 2, 4]) >>> x[0] = 3 # changing x changes y as well, since y is a view on x >>> y array([3, 2, 4]) wrapper Python is a high-level (highly abstracted, or English-like) language. This abstraction comes at a price in execution speed, and sometimes it becomes necessary to use lower level languages to do fast computations. A wrapper is code that provides a bridge between high and the low level languages, allowing, e.g., Python to execute code written in C or Fortran. Examples include ctypes, SWIG and Cython (which wraps C and C++) and f2py (which wraps Fortran). """ from __future__ import division, absolute_import, print_function
npalermo10/auto_choice_assay_train-test
refs/heads/master
venv/lib/python2.7/site-packages/pip/utils/logging.py
83
from __future__ import absolute_import import contextlib import logging import logging.handlers import os try: import threading except ImportError: import dummy_threading as threading from pip.compat import WINDOWS from pip.utils import ensure_dir try: from pip._vendor import colorama # Lots of different errors can come from this, including SystemError and # ImportError. except Exception: colorama = None _log_state = threading.local() _log_state.indentation = 0 @contextlib.contextmanager def indent_log(num=2): """ A context manager which will cause the log output to be indented for any log messages emited inside it. """ _log_state.indentation += num try: yield finally: _log_state.indentation -= num def get_indentation(): return _log_state.indentation class IndentingFormatter(logging.Formatter): def format(self, record): """ Calls the standard formatter, but will indent all of the log messages by our current indentation level. """ formatted = logging.Formatter.format(self, record) formatted = "".join([ (" " * get_indentation()) + line for line in formatted.splitlines(True) ]) return formatted def _color_wrap(*colors): def wrapped(inp): return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) return wrapped class ColorizedStreamHandler(logging.StreamHandler): # Don't build up a list of colors if we don't have colorama if colorama: COLORS = [ # This needs to be in order from highest logging level to lowest. (logging.ERROR, _color_wrap(colorama.Fore.RED)), (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), ] else: COLORS = [] def __init__(self, stream=None): logging.StreamHandler.__init__(self, stream) if WINDOWS and colorama: self.stream = colorama.AnsiToWin32(self.stream) def should_color(self): # Don't colorize things if we do not have colorama if not colorama: return False real_stream = ( self.stream if not isinstance(self.stream, colorama.AnsiToWin32) else self.stream.wrapped ) # If the stream is a tty we should color it if hasattr(real_stream, "isatty") and real_stream.isatty(): return True # If we have an ASNI term we should color it if os.environ.get("TERM") == "ANSI": return True # If anything else we should not color it return False def format(self, record): msg = logging.StreamHandler.format(self, record) if self.should_color(): for level, color in self.COLORS: if record.levelno >= level: msg = color(msg) break return msg class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): def _open(self): ensure_dir(os.path.dirname(self.baseFilename)) return logging.handlers.RotatingFileHandler._open(self) class MaxLevelFilter(logging.Filter): def __init__(self, level): self.level = level def filter(self, record): return record.levelno < self.level
unlessbamboo/grocery-shop
refs/heads/master
language/python/src/logging/cgitbTest.py
1
#coding:utf-8 ''' cgitb模块用于日志信息调试,输出异常上下文所有相关变量的信息 ''' import cgitb import sys import traceback def cgitbTest(): cgitb.enable(format='text') list1 = [1, 2, 3] list2 = list1 return 1/0 if __name__ == '__main__': '''main''' cgitbTest()
melissaihrig/namebench
refs/heads/master
nb_third_party/jinja2/runtime.py
207
# -*- coding: utf-8 -*- """ jinja2.runtime ~~~~~~~~~~~~~~ Runtime helpers. :copyright: (c) 2010 by the Jinja Team. :license: BSD. """ import sys from itertools import chain, imap from jinja2.nodes import EvalContext from jinja2.utils import Markup, partial, soft_unicode, escape, missing, \ concat, MethodType, FunctionType, internalcode, next, \ object_type_repr from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \ TemplateNotFound # these variables are exported to the template runtime __all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup', 'TemplateRuntimeError', 'missing', 'concat', 'escape', 'markup_join', 'unicode_join', 'to_string', 'TemplateNotFound'] #: the types we support for context functions _context_function_types = (FunctionType, MethodType) #: the name of the function that is used to convert something into #: a string. 2to3 will adopt that automatically and the generated #: code can take advantage of it. to_string = unicode def markup_join(seq): """Concatenation that escapes if necessary and converts to unicode.""" buf = [] iterator = imap(soft_unicode, seq) for arg in iterator: buf.append(arg) if hasattr(arg, '__html__'): return Markup(u'').join(chain(buf, iterator)) return concat(buf) def unicode_join(seq): """Simple args to unicode conversion and concatenation.""" return concat(imap(unicode, seq)) def new_context(environment, template_name, blocks, vars=None, shared=None, globals=None, locals=None): """Internal helper to for context creation.""" if vars is None: vars = {} if shared: parent = vars else: parent = dict(globals or (), **vars) if locals: # if the parent is shared a copy should be created because # we don't want to modify the dict passed if shared: parent = dict(parent) for key, value in locals.iteritems(): if key[:2] == 'l_' and value is not missing: parent[key[2:]] = value return Context(environment, parent, template_name, blocks) class TemplateReference(object): """The `self` in templates.""" def __init__(self, context): self.__context = context def __getitem__(self, name): blocks = self.__context.blocks[name] wrap = self.__context.eval_ctx.autoescape and \ Markup or (lambda x: x) return BlockReference(name, self.__context, blocks, 0) def __repr__(self): return '<%s %r>' % ( self.__class__.__name__, self.__context.name ) class Context(object): """The template context holds the variables of a template. It stores the values passed to the template and also the names the template exports. Creating instances is neither supported nor useful as it's created automatically at various stages of the template evaluation and should not be created by hand. The context is immutable. Modifications on :attr:`parent` **must not** happen and modifications on :attr:`vars` are allowed from generated template code only. Template filters and global functions marked as :func:`contextfunction`\s get the active context passed as first argument and are allowed to access the context read-only. The template context supports read only dict operations (`get`, `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`, `__getitem__`, `__contains__`). Additionally there is a :meth:`resolve` method that doesn't fail with a `KeyError` but returns an :class:`Undefined` object for missing variables. """ __slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars', 'name', 'blocks', '__weakref__') def __init__(self, environment, parent, name, blocks): self.parent = parent self.vars = {} self.environment = environment self.eval_ctx = EvalContext(self.environment, name) self.exported_vars = set() self.name = name # create the initial mapping of blocks. Whenever template inheritance # takes place the runtime will update this mapping with the new blocks # from the template. self.blocks = dict((k, [v]) for k, v in blocks.iteritems()) def super(self, name, current): """Render a parent block.""" try: blocks = self.blocks[name] index = blocks.index(current) + 1 blocks[index] except LookupError: return self.environment.undefined('there is no parent block ' 'called %r.' % name, name='super') return BlockReference(name, self, blocks, index) def get(self, key, default=None): """Returns an item from the template context, if it doesn't exist `default` is returned. """ try: return self[key] except KeyError: return default def resolve(self, key): """Looks up a variable like `__getitem__` or `get` but returns an :class:`Undefined` object with the name of the name looked up. """ if key in self.vars: return self.vars[key] if key in self.parent: return self.parent[key] return self.environment.undefined(name=key) def get_exported(self): """Get a new dict with the exported variables.""" return dict((k, self.vars[k]) for k in self.exported_vars) def get_all(self): """Return a copy of the complete context as dict including the exported variables. """ return dict(self.parent, **self.vars) @internalcode def call(__self, __obj, *args, **kwargs): """Call the callable with the arguments and keyword arguments provided but inject the active context or environment as first argument if the callable is a :func:`contextfunction` or :func:`environmentfunction`. """ if __debug__: __traceback_hide__ = True if isinstance(__obj, _context_function_types): if getattr(__obj, 'contextfunction', 0): args = (__self,) + args elif getattr(__obj, 'evalcontextfunction', 0): args = (__self.eval_ctx,) + args elif getattr(__obj, 'environmentfunction', 0): args = (__self.environment,) + args return __obj(*args, **kwargs) def derived(self, locals=None): """Internal helper function to create a derived context.""" context = new_context(self.environment, self.name, {}, self.parent, True, None, locals) context.eval_ctx = self.eval_ctx context.blocks.update((k, list(v)) for k, v in self.blocks.iteritems()) return context def _all(meth): proxy = lambda self: getattr(self.get_all(), meth)() proxy.__doc__ = getattr(dict, meth).__doc__ proxy.__name__ = meth return proxy keys = _all('keys') values = _all('values') items = _all('items') # not available on python 3 if hasattr(dict, 'iterkeys'): iterkeys = _all('iterkeys') itervalues = _all('itervalues') iteritems = _all('iteritems') del _all def __contains__(self, name): return name in self.vars or name in self.parent def __getitem__(self, key): """Lookup a variable or raise `KeyError` if the variable is undefined. """ item = self.resolve(key) if isinstance(item, Undefined): raise KeyError(key) return item def __repr__(self): return '<%s %s of %r>' % ( self.__class__.__name__, repr(self.get_all()), self.name ) # register the context as mapping if possible try: from collections import Mapping Mapping.register(Context) except ImportError: pass class BlockReference(object): """One block on a template reference.""" def __init__(self, name, context, stack, depth): self.name = name self._context = context self._stack = stack self._depth = depth @property def super(self): """Super the block.""" if self._depth + 1 >= len(self._stack): return self._context.environment. \ undefined('there is no parent block called %r.' % self.name, name='super') return BlockReference(self.name, self._context, self._stack, self._depth + 1) @internalcode def __call__(self): rv = concat(self._stack[self._depth](self._context)) if self._context.eval_ctx.autoescape: rv = Markup(rv) return rv class LoopContext(object): """A loop context for dynamic iteration.""" def __init__(self, iterable, recurse=None): self._iterator = iter(iterable) self._recurse = recurse self.index0 = -1 # try to get the length of the iterable early. This must be done # here because there are some broken iterators around where there # __len__ is the number of iterations left (i'm looking at your # listreverseiterator!). try: self._length = len(iterable) except (TypeError, AttributeError): self._length = None def cycle(self, *args): """Cycles among the arguments with the current loop index.""" if not args: raise TypeError('no items for cycling given') return args[self.index0 % len(args)] first = property(lambda x: x.index0 == 0) last = property(lambda x: x.index0 + 1 == x.length) index = property(lambda x: x.index0 + 1) revindex = property(lambda x: x.length - x.index0) revindex0 = property(lambda x: x.length - x.index) def __len__(self): return self.length def __iter__(self): return LoopContextIterator(self) @internalcode def loop(self, iterable): if self._recurse is None: raise TypeError('Tried to call non recursive loop. Maybe you ' "forgot the 'recursive' modifier.") return self._recurse(iterable, self._recurse) # a nifty trick to enhance the error message if someone tried to call # the the loop without or with too many arguments. __call__ = loop del loop @property def length(self): if self._length is None: # if was not possible to get the length of the iterator when # the loop context was created (ie: iterating over a generator) # we have to convert the iterable into a sequence and use the # length of that. iterable = tuple(self._iterator) self._iterator = iter(iterable) self._length = len(iterable) + self.index0 + 1 return self._length def __repr__(self): return '<%s %r/%r>' % ( self.__class__.__name__, self.index, self.length ) class LoopContextIterator(object): """The iterator for a loop context.""" __slots__ = ('context',) def __init__(self, context): self.context = context def __iter__(self): return self def next(self): ctx = self.context ctx.index0 += 1 return next(ctx._iterator), ctx class Macro(object): """Wraps a macro.""" def __init__(self, environment, func, name, arguments, defaults, catch_kwargs, catch_varargs, caller): self._environment = environment self._func = func self._argument_count = len(arguments) self.name = name self.arguments = arguments self.defaults = defaults self.catch_kwargs = catch_kwargs self.catch_varargs = catch_varargs self.caller = caller @internalcode def __call__(self, *args, **kwargs): arguments = [] for idx, name in enumerate(self.arguments): try: value = args[idx] except: try: value = kwargs.pop(name) except: try: value = self.defaults[idx - self._argument_count] except: value = self._environment.undefined( 'parameter %r was not provided' % name, name=name) arguments.append(value) # it's important that the order of these arguments does not change # if not also changed in the compiler's `function_scoping` method. # the order is caller, keyword arguments, positional arguments! if self.caller: caller = kwargs.pop('caller', None) if caller is None: caller = self._environment.undefined('No caller defined', name='caller') arguments.append(caller) if self.catch_kwargs: arguments.append(kwargs) elif kwargs: raise TypeError('macro %r takes no keyword argument %r' % (self.name, next(iter(kwargs)))) if self.catch_varargs: arguments.append(args[self._argument_count:]) elif len(args) > self._argument_count: raise TypeError('macro %r takes not more than %d argument(s)' % (self.name, len(self.arguments))) return self._func(*arguments) def __repr__(self): return '<%s %s>' % ( self.__class__.__name__, self.name is None and 'anonymous' or repr(self.name) ) class Undefined(object): """The default undefined type. This undefined type can be printed and iterated over, but every other access will raise an :exc:`UndefinedError`: >>> foo = Undefined(name='foo') >>> str(foo) '' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... UndefinedError: 'foo' is undefined """ __slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name', '_undefined_exception') def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError): self._undefined_hint = hint self._undefined_obj = obj self._undefined_name = name self._undefined_exception = exc @internalcode def _fail_with_undefined_error(self, *args, **kwargs): """Regular callback function for undefined objects that raises an `UndefinedError` on call. """ if self._undefined_hint is None: if self._undefined_obj is missing: hint = '%r is undefined' % self._undefined_name elif not isinstance(self._undefined_name, basestring): hint = '%s has no element %r' % ( object_type_repr(self._undefined_obj), self._undefined_name ) else: hint = '%r has no attribute %r' % ( object_type_repr(self._undefined_obj), self._undefined_name ) else: hint = self._undefined_hint raise self._undefined_exception(hint) __add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \ __truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \ __mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \ __getattr__ = __getitem__ = __lt__ = __le__ = __gt__ = __ge__ = \ __int__ = __float__ = __complex__ = __pow__ = __rpow__ = \ _fail_with_undefined_error def __str__(self): return unicode(self).encode('utf-8') # unicode goes after __str__ because we configured 2to3 to rename # __unicode__ to __str__. because the 2to3 tree is not designed to # remove nodes from it, we leave the above __str__ around and let # it override at runtime. def __unicode__(self): return u'' def __len__(self): return 0 def __iter__(self): if 0: yield None def __nonzero__(self): return False def __repr__(self): return 'Undefined' class DebugUndefined(Undefined): """An undefined that returns the debug info when printed. >>> foo = DebugUndefined(name='foo') >>> str(foo) '{{ foo }}' >>> not foo True >>> foo + 42 Traceback (most recent call last): ... UndefinedError: 'foo' is undefined """ __slots__ = () def __unicode__(self): if self._undefined_hint is None: if self._undefined_obj is missing: return u'{{ %s }}' % self._undefined_name return '{{ no such element: %s[%r] }}' % ( object_type_repr(self._undefined_obj), self._undefined_name ) return u'{{ undefined value printed: %s }}' % self._undefined_hint class StrictUndefined(Undefined): """An undefined that barks on print and iteration as well as boolean tests and all kinds of comparisons. In other words: you can do nothing with it except checking if it's defined using the `defined` test. >>> foo = StrictUndefined(name='foo') >>> str(foo) Traceback (most recent call last): ... UndefinedError: 'foo' is undefined >>> not foo Traceback (most recent call last): ... UndefinedError: 'foo' is undefined >>> foo + 42 Traceback (most recent call last): ... UndefinedError: 'foo' is undefined """ __slots__ = () __iter__ = __unicode__ = __str__ = __len__ = __nonzero__ = __eq__ = \ __ne__ = Undefined._fail_with_undefined_error # remove remaining slots attributes, after the metaclass did the magic they # are unneeded and irritating as they contain wrong data for the subclasses. del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
kralf/morsel-ros
refs/heads/master
src/morsel/morsel_simulator/python/morsel_ros/output/ros_output.py
1
from morsel.nodes import Output import re import inspect #------------------------------------------------------------------------------- class ROSOutput(Output): def __init__(self, node, name = "ROSOutput", topic = None, queueSize = 1000, frame = None, **kargs): Output.__init__(self, name, **kargs) if topic[0] != "/": if self.parent: self.topic = "/"+re.sub("\s+", "_", re.sub("([a-z])([A-Z0-9])", "\\1_\\2", self.parent.getName())).lower() else: self.topic = "" self.topic += "/"+topic else: self.topic = topic if not frame and self.parent != render: frame = "/"+re.sub("\s+", "_", re.sub("([a-z])([A-Z0-9])", "\\1_\\2", self.parent.getName())).lower() self.node = node self.queueSize = queueSize self.publisher = None self.frame = frame #------------------------------------------------------------------------------- def getModule(self): for frame in inspect.getouterframes(inspect.currentframe()): module = inspect.getmodule(frame[0]) if module.__name__ != __name__: return module.__name__ return __name__ module = property(getModule) #------------------------------------------------------------------------------- def getTopic(self): return self.topics[self.module] def setTopic(self, topic): if not hasattr(self, "topics"): self.topics = {} self.topics[self.module] = topic topic = property(getTopic, setTopic) #------------------------------------------------------------------------------- def getNode(self): return self.nodes[self.topic] def setNode(self, node): if not hasattr(self, "nodes"): self.nodes = {} self.nodes[self.topic] = node node = property(getNode, setNode) #------------------------------------------------------------------------------- def getQueueSize(self): return self.queueSizes[self.topic] def setQueueSize(self, queueSize): if not hasattr(self, "queueSizes"): self.queueSizes = {} self.queueSizes[self.topic] = queueSize queueSize = property(getQueueSize, setQueueSize) #------------------------------------------------------------------------------- def getPublisher(self): return self.publishers[self.topic] def setPublisher(self, publisher): if not hasattr(self, "publishers"): self.publishers = {} self.publishers[self.topic] = publisher publisher = property(getPublisher, setPublisher) #------------------------------------------------------------------------------- def getFrame(self): if self.parent.hasPythonTag("frame"): return self.parent.getPythonTag("frame") else: return "/world" def setFrame(self, frame): if frame == "/world": frame = None if self.parent.hasPythonTag("frame"): if not frame or self.parent.getPythonTag("frame") != frame: framework.error("Conflicting frame identifiers") elif frame: self.parent.setPythonTag("frame", frame) frame = property(getFrame, setFrame) #------------------------------------------------------------------------------- def getFrameParent(self, node = None): if not node: node = self.parent while node and node.hasParent(): if node.getParent().hasPythonTag("frame"): return node.getParent(), node.getParent().getPythonTag("frame") else: if node.getParent().hasParent(): node = node.getParent() else: node = None return render, "/world" frameParent = property(getFrameParent) #------------------------------------------------------------------------------- def getParentFrame(self, node = None): parentNode, parentFrame = self.getFrameParent(node) return parentFrame parentFrame = property(getParentFrame) #------------------------------------------------------------------------------- def publish(self, *args): self.publishers[self.topic].publish(*args)
chris-chris/tensorflow
refs/heads/master
tensorflow/contrib/integrate/__init__.py
100
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Integration and ODE solvers. See the @{$python/contrib.integrate} guide. @@odeint """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # pylint: disable=wildcard-import from tensorflow.contrib.integrate.python.ops.odes import * from tensorflow.python.util.all_util import remove_undocumented remove_undocumented(__name__)
vtlg/angulegis
refs/heads/master
angulegis/node_modules/node-gyp/gyp/pylib/gyp/common.py
1292
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. from __future__ import with_statement import collections import errno import filecmp import os.path import re import tempfile import sys # A minimal memoizing decorator. It'll blow up if the args aren't immutable, # among other "problems". class memoize(object): def __init__(self, func): self.func = func self.cache = {} def __call__(self, *args): try: return self.cache[args] except KeyError: result = self.func(*args) self.cache[args] = result return result class GypError(Exception): """Error class representing an error, which is to be presented to the user. The main entry point will catch and display this. """ pass def ExceptionAppend(e, msg): """Append a message to the given exception's message.""" if not e.args: e.args = (msg,) elif len(e.args) == 1: e.args = (str(e.args[0]) + ' ' + msg,) else: e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:] def FindQualifiedTargets(target, qualified_list): """ Given a list of qualified targets, return the qualified targets for the specified |target|. """ return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target] def ParseQualifiedTarget(target): # Splits a qualified target into a build file, target name and toolset. # NOTE: rsplit is used to disambiguate the Windows drive letter separator. target_split = target.rsplit(':', 1) if len(target_split) == 2: [build_file, target] = target_split else: build_file = None target_split = target.rsplit('#', 1) if len(target_split) == 2: [target, toolset] = target_split else: toolset = None return [build_file, target, toolset] def ResolveTarget(build_file, target, toolset): # This function resolves a target into a canonical form: # - a fully defined build file, either absolute or relative to the current # directory # - a target name # - a toolset # # build_file is the file relative to which 'target' is defined. # target is the qualified target. # toolset is the default toolset for that target. [parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target) if parsed_build_file: if build_file: # If a relative path, parsed_build_file is relative to the directory # containing build_file. If build_file is not in the current directory, # parsed_build_file is not a usable path as-is. Resolve it by # interpreting it as relative to build_file. If parsed_build_file is # absolute, it is usable as a path regardless of the current directory, # and os.path.join will return it as-is. build_file = os.path.normpath(os.path.join(os.path.dirname(build_file), parsed_build_file)) # Further (to handle cases like ../cwd), make it relative to cwd) if not os.path.isabs(build_file): build_file = RelativePath(build_file, '.') else: build_file = parsed_build_file if parsed_toolset: toolset = parsed_toolset return [build_file, target, toolset] def BuildFile(fully_qualified_target): # Extracts the build file from the fully qualified target. return ParseQualifiedTarget(fully_qualified_target)[0] def GetEnvironFallback(var_list, default): """Look up a key in the environment, with fallback to secondary keys and finally falling back to a default value.""" for var in var_list: if var in os.environ: return os.environ[var] return default def QualifiedTarget(build_file, target, toolset): # "Qualified" means the file that a target was defined in and the target # name, separated by a colon, suffixed by a # and the toolset name: # /path/to/file.gyp:target_name#toolset fully_qualified = build_file + ':' + target if toolset: fully_qualified = fully_qualified + '#' + toolset return fully_qualified @memoize def RelativePath(path, relative_to, follow_path_symlink=True): # Assuming both |path| and |relative_to| are relative to the current # directory, returns a relative path that identifies path relative to # relative_to. # If |follow_symlink_path| is true (default) and |path| is a symlink, then # this method returns a path to the real file represented by |path|. If it is # false, this method returns a path to the symlink. If |path| is not a # symlink, this option has no effect. # Convert to normalized (and therefore absolute paths). if follow_path_symlink: path = os.path.realpath(path) else: path = os.path.abspath(path) relative_to = os.path.realpath(relative_to) # On Windows, we can't create a relative path to a different drive, so just # use the absolute path. if sys.platform == 'win32': if (os.path.splitdrive(path)[0].lower() != os.path.splitdrive(relative_to)[0].lower()): return path # Split the paths into components. path_split = path.split(os.path.sep) relative_to_split = relative_to.split(os.path.sep) # Determine how much of the prefix the two paths share. prefix_len = len(os.path.commonprefix([path_split, relative_to_split])) # Put enough ".." components to back up out of relative_to to the common # prefix, and then append the part of path_split after the common prefix. relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \ path_split[prefix_len:] if len(relative_split) == 0: # The paths were the same. return '' # Turn it back into a string and we're done. return os.path.join(*relative_split) @memoize def InvertRelativePath(path, toplevel_dir=None): """Given a path like foo/bar that is relative to toplevel_dir, return the inverse relative path back to the toplevel_dir. E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path))) should always produce the empty string, unless the path contains symlinks. """ if not path: return path toplevel_dir = '.' if toplevel_dir is None else toplevel_dir return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path)) def FixIfRelativePath(path, relative_to): # Like RelativePath but returns |path| unchanged if it is absolute. if os.path.isabs(path): return path return RelativePath(path, relative_to) def UnrelativePath(path, relative_to): # Assuming that |relative_to| is relative to the current directory, and |path| # is a path relative to the dirname of |relative_to|, returns a path that # identifies |path| relative to the current directory. rel_dir = os.path.dirname(relative_to) return os.path.normpath(os.path.join(rel_dir, path)) # re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at # http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02 # and the documentation for various shells. # _quote is a pattern that should match any argument that needs to be quoted # with double-quotes by EncodePOSIXShellArgument. It matches the following # characters appearing anywhere in an argument: # \t, \n, space parameter separators # # comments # $ expansions (quoted to always expand within one argument) # % called out by IEEE 1003.1 XCU.2.2 # & job control # ' quoting # (, ) subshell execution # *, ?, [ pathname expansion # ; command delimiter # <, >, | redirection # = assignment # {, } brace expansion (bash) # ~ tilde expansion # It also matches the empty string, because "" (or '') is the only way to # represent an empty string literal argument to a POSIX shell. # # This does not match the characters in _escape, because those need to be # backslash-escaped regardless of whether they appear in a double-quoted # string. _quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$') # _escape is a pattern that should match any character that needs to be # escaped with a backslash, whether or not the argument matched the _quote # pattern. _escape is used with re.sub to backslash anything in _escape's # first match group, hence the (parentheses) in the regular expression. # # _escape matches the following characters appearing anywhere in an argument: # " to prevent POSIX shells from interpreting this character for quoting # \ to prevent POSIX shells from interpreting this character for escaping # ` to prevent POSIX shells from interpreting this character for command # substitution # Missing from this list is $, because the desired behavior of # EncodePOSIXShellArgument is to permit parameter (variable) expansion. # # Also missing from this list is !, which bash will interpret as the history # expansion character when history is enabled. bash does not enable history # by default in non-interactive shells, so this is not thought to be a problem. # ! was omitted from this list because bash interprets "\!" as a literal string # including the backslash character (avoiding history expansion but retaining # the backslash), which would not be correct for argument encoding. Handling # this case properly would also be problematic because bash allows the history # character to be changed with the histchars shell variable. Fortunately, # as history is not enabled in non-interactive shells and # EncodePOSIXShellArgument is only expected to encode for non-interactive # shells, there is no room for error here by ignoring !. _escape = re.compile(r'(["\\`])') def EncodePOSIXShellArgument(argument): """Encodes |argument| suitably for consumption by POSIX shells. argument may be quoted and escaped as necessary to ensure that POSIX shells treat the returned value as a literal representing the argument passed to this function. Parameter (variable) expansions beginning with $ are allowed to remain intact without escaping the $, to allow the argument to contain references to variables to be expanded by the shell. """ if not isinstance(argument, str): argument = str(argument) if _quote.search(argument): quote = '"' else: quote = '' encoded = quote + re.sub(_escape, r'\\\1', argument) + quote return encoded def EncodePOSIXShellList(list): """Encodes |list| suitably for consumption by POSIX shells. Returns EncodePOSIXShellArgument for each item in list, and joins them together using the space character as an argument separator. """ encoded_arguments = [] for argument in list: encoded_arguments.append(EncodePOSIXShellArgument(argument)) return ' '.join(encoded_arguments) def DeepDependencyTargets(target_dicts, roots): """Returns the recursive list of target dependencies.""" dependencies = set() pending = set(roots) while pending: # Pluck out one. r = pending.pop() # Skip if visited already. if r in dependencies: continue # Add it. dependencies.add(r) # Add its children. spec = target_dicts[r] pending.update(set(spec.get('dependencies', []))) pending.update(set(spec.get('dependencies_original', []))) return list(dependencies - set(roots)) def BuildFileTargets(target_list, build_file): """From a target_list, returns the subset from the specified build_file. """ return [p for p in target_list if BuildFile(p) == build_file] def AllTargets(target_list, target_dicts, build_file): """Returns all targets (direct and dependencies) for the specified build_file. """ bftargets = BuildFileTargets(target_list, build_file) deptargets = DeepDependencyTargets(target_dicts, bftargets) return bftargets + deptargets def WriteOnDiff(filename): """Write to a file only if the new contents differ. Arguments: filename: name of the file to potentially write to. Returns: A file like object which will write to temporary file and only overwrite the target if it differs (on close). """ class Writer(object): """Wrapper around file which only covers the target if it differs.""" def __init__(self): # Pick temporary file. tmp_fd, self.tmp_path = tempfile.mkstemp( suffix='.tmp', prefix=os.path.split(filename)[1] + '.gyp.', dir=os.path.split(filename)[0]) try: self.tmp_file = os.fdopen(tmp_fd, 'wb') except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise def __getattr__(self, attrname): # Delegate everything else to self.tmp_file return getattr(self.tmp_file, attrname) def close(self): try: # Close tmp file. self.tmp_file.close() # Determine if different. same = False try: same = filecmp.cmp(self.tmp_path, filename, False) except OSError, e: if e.errno != errno.ENOENT: raise if same: # The new file is identical to the old one, just get rid of the new # one. os.unlink(self.tmp_path) else: # The new file is different from the old one, or there is no old one. # Rename the new file to the permanent name. # # tempfile.mkstemp uses an overly restrictive mode, resulting in a # file that can only be read by the owner, regardless of the umask. # There's no reason to not respect the umask here, which means that # an extra hoop is required to fetch it and reset the new file's mode. # # No way to get the umask without setting a new one? Set a safe one # and then set it back to the old value. umask = os.umask(077) os.umask(umask) os.chmod(self.tmp_path, 0666 & ~umask) if sys.platform == 'win32' and os.path.exists(filename): # NOTE: on windows (but not cygwin) rename will not replace an # existing file, so it must be preceded with a remove. Sadly there # is no way to make the switch atomic. os.remove(filename) os.rename(self.tmp_path, filename) except Exception: # Don't leave turds behind. os.unlink(self.tmp_path) raise return Writer() def EnsureDirExists(path): """Make sure the directory for |path| exists.""" try: os.makedirs(os.path.dirname(path)) except OSError: pass def GetFlavor(params): """Returns |params.flavor| if it's set, the system's default flavor else.""" flavors = { 'cygwin': 'win', 'win32': 'win', 'darwin': 'mac', } if 'flavor' in params: return params['flavor'] if sys.platform in flavors: return flavors[sys.platform] if sys.platform.startswith('sunos'): return 'solaris' if sys.platform.startswith('freebsd'): return 'freebsd' if sys.platform.startswith('openbsd'): return 'openbsd' if sys.platform.startswith('netbsd'): return 'netbsd' if sys.platform.startswith('aix'): return 'aix' return 'linux' def CopyTool(flavor, out_path): """Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it to |out_path|.""" # aix and solaris just need flock emulation. mac and win use more complicated # support scripts. prefix = { 'aix': 'flock', 'solaris': 'flock', 'mac': 'mac', 'win': 'win' }.get(flavor, None) if not prefix: return # Slurp input file. source_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix) with open(source_path) as source_file: source = source_file.readlines() # Add header and write it out. tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix) with open(tool_path, 'w') as tool_file: tool_file.write( ''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:])) # Make file executable. os.chmod(tool_path, 0755) # From Alex Martelli, # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 # ASPN: Python Cookbook: Remove duplicates from a sequence # First comment, dated 2001/10/13. # (Also in the printed Python Cookbook.) def uniquer(seq, idfun=None): if idfun is None: idfun = lambda x: x seen = {} result = [] for item in seq: marker = idfun(item) if marker in seen: continue seen[marker] = 1 result.append(item) return result # Based on http://code.activestate.com/recipes/576694/. class OrderedSet(collections.MutableSet): def __init__(self, iterable=None): self.end = end = [] end += [None, end, end] # sentinel node for doubly linked list self.map = {} # key --> [key, prev, next] if iterable is not None: self |= iterable def __len__(self): return len(self.map) def __contains__(self, key): return key in self.map def add(self, key): if key not in self.map: end = self.end curr = end[1] curr[2] = end[1] = self.map[key] = [key, curr, end] def discard(self, key): if key in self.map: key, prev_item, next_item = self.map.pop(key) prev_item[2] = next_item next_item[1] = prev_item def __iter__(self): end = self.end curr = end[2] while curr is not end: yield curr[0] curr = curr[2] def __reversed__(self): end = self.end curr = end[1] while curr is not end: yield curr[0] curr = curr[1] # The second argument is an addition that causes a pylint warning. def pop(self, last=True): # pylint: disable=W0221 if not self: raise KeyError('set is empty') key = self.end[1][0] if last else self.end[2][0] self.discard(key) return key def __repr__(self): if not self: return '%s()' % (self.__class__.__name__,) return '%s(%r)' % (self.__class__.__name__, list(self)) def __eq__(self, other): if isinstance(other, OrderedSet): return len(self) == len(other) and list(self) == list(other) return set(self) == set(other) # Extensions to the recipe. def update(self, iterable): for i in iterable: if i not in self: self.add(i) class CycleError(Exception): """An exception raised when an unexpected cycle is detected.""" def __init__(self, nodes): self.nodes = nodes def __str__(self): return 'CycleError: cycle involving: ' + str(self.nodes) def TopologicallySorted(graph, get_edges): r"""Topologically sort based on a user provided edge definition. Args: graph: A list of node names. get_edges: A function mapping from node name to a hashable collection of node names which this node has outgoing edges to. Returns: A list containing all of the node in graph in topological order. It is assumed that calling get_edges once for each node and caching is cheaper than repeatedly calling get_edges. Raises: CycleError in the event of a cycle. Example: graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'} def GetEdges(node): return re.findall(r'\$\(([^))]\)', graph[node]) print TopologicallySorted(graph.keys(), GetEdges) ==> ['a', 'c', b'] """ get_edges = memoize(get_edges) visited = set() visiting = set() ordered_nodes = [] def Visit(node): if node in visiting: raise CycleError(visiting) if node in visited: return visited.add(node) visiting.add(node) for neighbor in get_edges(node): Visit(neighbor) visiting.remove(node) ordered_nodes.insert(0, node) for node in sorted(graph): Visit(node) return ordered_nodes def CrossCompileRequested(): # TODO: figure out how to not build extra host objects in the # non-cross-compile case when this is enabled, and enable unconditionally. return (os.environ.get('GYP_CROSSCOMPILE') or os.environ.get('AR_host') or os.environ.get('CC_host') or os.environ.get('CXX_host') or os.environ.get('AR_target') or os.environ.get('CC_target') or os.environ.get('CXX_target'))
DirtyPiece/dancestudio
refs/heads/master
Build/Tools/Python27/Lib/lib2to3/fixes/fix_renames.py
326
"""Fix incompatible renames Fixes: * sys.maxint -> sys.maxsize """ # Author: Christian Heimes # based on Collin Winter's fix_import # Local imports from .. import fixer_base from ..fixer_util import Name, attr_chain MAPPING = {"sys": {"maxint" : "maxsize"}, } LOOKUP = {} def alternates(members): return "(" + "|".join(map(repr, members)) + ")" def build_pattern(): #bare = set() for module, replace in MAPPING.items(): for old_attr, new_attr in replace.items(): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #yield """ # import_name< 'import' (module=%r # | dotted_as_names< any* module=%r any* >) > # """ % (module, module) yield """ import_from< 'from' module_name=%r 'import' ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > """ % (module, old_attr, old_attr) yield """ power< module_name=%r trailer< '.' attr_name=%r > any* > """ % (module, old_attr) #yield """bare_name=%s""" % alternates(bare) class FixRenames(fixer_base.BaseFix): BM_compatible = True PATTERN = "|".join(build_pattern()) order = "pre" # Pre-order tree traversal # Don't match the node if it's within another match def match(self, node): match = super(FixRenames, self).match results = match(node) if results: if any(match(obj) for obj in attr_chain(node, "parent")): return False return results return False #def start_tree(self, tree, filename): # super(FixRenames, self).start_tree(tree, filename) # self.replace = {} def transform(self, node, results): mod_name = results.get("module_name") attr_name = results.get("attr_name") #bare_name = results.get("bare_name") #import_mod = results.get("module") if mod_name and attr_name: new_attr = unicode(LOOKUP[(mod_name.value, attr_name.value)]) attr_name.replace(Name(new_attr, prefix=attr_name.prefix))
xodus7/tensorflow
refs/heads/master
tensorflow/contrib/tensor_forest/python/ops/data_ops.py
90
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for preprocessing data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.tensor_forest.python.ops import tensor_forest_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.platform import tf_logging as logging # Data column types for indicating categorical or other non-float values. DATA_FLOAT = 0 DATA_CATEGORICAL = 1 DTYPE_TO_FTYPE = { dtypes.string: DATA_CATEGORICAL, dtypes.int32: DATA_CATEGORICAL, dtypes.int64: DATA_CATEGORICAL, dtypes.float32: DATA_FLOAT, dtypes.float64: DATA_FLOAT } def CastToFloat(tensor): if tensor.dtype == dtypes.string: return tensor_forest_ops.reinterpret_string_to_float(tensor) elif tensor.dtype.is_integer: return math_ops.to_float(tensor) else: return tensor # TODO(gilberth): If protos are ever allowed in dynamically loaded custom # op libraries, convert this to a proto like a sane person. class TensorForestDataSpec(object): def __init__(self): self.sparse = DataColumnCollection() self.dense = DataColumnCollection() self.dense_features_size = 0 def SerializeToString(self): return 'dense_features_size: %d dense: [%s] sparse: [%s]' % ( self.dense_features_size, self.dense.SerializeToString(), self.sparse.SerializeToString()) class DataColumnCollection(object): """Collection of DataColumns, meant to mimic a proto repeated field.""" def __init__(self): self.cols = [] def add(self): # pylint: disable=invalid-name self.cols.append(DataColumn()) return self.cols[-1] def size(self): # pylint: disable=invalid-name return len(self.cols) def SerializeToString(self): ret = '' for c in self.cols: ret += '{%s}' % c.SerializeToString() return ret class DataColumn(object): def __init__(self): self.name = '' self.original_type = '' self.size = 0 def SerializeToString(self): return 'name: {0} original_type: {1} size: {2}'.format(self.name, self.original_type, self.size) def GetColumnName(column_key, col_num): if isinstance(column_key, str): return column_key else: return getattr(column_key, 'column_name', str(col_num)) def ParseDataTensorOrDict(data): """Return a tensor to use for input data. The incoming features can be a dict where keys are the string names of the columns, which we turn into a single 2-D tensor. Args: data: `Tensor` or `dict` of `Tensor` objects. Returns: A 2-D tensor for input to tensor_forest, a keys tensor for the tf.Examples if they exist, and a list of the type of each column (e.g. continuous float, categorical). """ data_spec = TensorForestDataSpec() if isinstance(data, dict): dense_features_size = 0 dense_features = [] sparse_features = [] for k in sorted(data.keys()): is_sparse = isinstance(data[k], sparse_tensor.SparseTensor) if is_sparse: # TODO(gilberth): support sparse continuous. if data[k].dtype == dtypes.float32: logging.info('TensorForest does not support sparse continuous.') continue elif data_spec.sparse.size() == 0: col_spec = data_spec.sparse.add() col_spec.original_type = DATA_CATEGORICAL col_spec.name = 'all_sparse' col_spec.size = -1 sparse_features.append( sparse_tensor.SparseTensor(data[ k].indices, CastToFloat(data[k].values), data[k].dense_shape)) else: col_spec = data_spec.dense.add() col_spec.original_type = DTYPE_TO_FTYPE[data[k].dtype] col_spec.name = GetColumnName(k, len(dense_features)) # the second dimension of get_shape should always be known. shape = data[k].get_shape() if len(shape) == 1: col_spec.size = 1 else: col_spec.size = shape[1].value dense_features_size += col_spec.size dense_features.append(CastToFloat(data[k])) processed_dense_features = None processed_sparse_features = None if dense_features: processed_dense_features = array_ops.concat(dense_features, 1) data_spec.dense_features_size = dense_features_size if sparse_features: processed_sparse_features = sparse_ops.sparse_concat(1, sparse_features) logging.info(data_spec.SerializeToString()) return processed_dense_features, processed_sparse_features, data_spec elif isinstance(data, sparse_tensor.SparseTensor): col_spec = data_spec.sparse.add() col_spec.name = 'sparse_features' col_spec.original_type = DTYPE_TO_FTYPE[data.dtype] col_spec.size = -1 data_spec.dense_features_size = 0 return None, data, data_spec else: data = ops.convert_to_tensor(data) col_spec = data_spec.dense.add() col_spec.name = 'dense_features' col_spec.original_type = DTYPE_TO_FTYPE[data.dtype] col_spec.size = data.get_shape()[1] data_spec.dense_features_size = col_spec.size return data, None, data_spec def ParseLabelTensorOrDict(labels): """Return a tensor to use for input labels to tensor_forest. The incoming targets can be a dict where keys are the string names of the columns, which we turn into a single 1-D tensor for classification or 2-D tensor for regression. Converts sparse tensors to dense ones. Args: labels: `Tensor` or `dict` of `Tensor` objects. Returns: A 2-D tensor for labels/outputs. """ if isinstance(labels, dict): return math_ops.to_float( array_ops.concat( [ sparse_ops.sparse_tensor_to_dense( labels[k], default_value=-1) if isinstance( labels, sparse_tensor.SparseTensor) else labels[k] for k in sorted(labels.keys()) ], 1)) else: if isinstance(labels, sparse_tensor.SparseTensor): return math_ops.to_float(sparse_ops.sparse_tensor_to_dense( labels, default_value=-1)) else: return math_ops.to_float(labels)
seanli9jan/tensorflow
refs/heads/master
tensorflow/python/ops/parallel_for/pfor.py
2
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Compiled parallel-for loop.""" # pylint: disable=missing-docstring from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import bitwise_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import functional_ops from tensorflow.python.ops import gen_parsing_ops from tensorflow.python.ops import gen_sparse_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import tensor_array_ops from tensorflow.python.platform import flags from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import nest flags.DEFINE_bool( "op_conversion_fallback_to_while_loop", False, "If true, falls back to using a while loop for ops for " "which a converter is not defined.") def _stack(t, length): """stacks `t` `length` times.""" ones = array_ops.ones_like(array_ops.shape(t)) multiples = array_ops.concat([length, ones], 0) t = array_ops.tile(array_ops.expand_dims(t, 0), multiples) return wrap(t, True) # The following stateful ops can be safely called once, and with the same # signature as the unconverted version, if their inputs are loop invariant. # TODO(agarwal): implement a strategy for converting Variable reads/writes. The # plan is to map each read/write in the loop_fn to a corresponding merged # read/write in the converted graph. Writes need to be mergeable (e.g. # AssignAdd) to be used in `pfor`. Given a certain read/write order in the # loop_fn, doing a one-to-one conversion will simulate executing such # instructions in lock-step across all iterations. passthrough_stateful_ops = set([ "VariableV2", "VarHandleOp", "ReadVariableOp", "StackV2", "TensorArrayWriteV3", "TensorArrayReadV3", "TensorArraySizeV3", ]) def _is_stateful_pfor_op(op): if isinstance(op, WhileOp): return op.is_stateful if op.type == "Const": # Const didn't have an op_def. return False if op.type in passthrough_stateful_ops: return False assert hasattr(op, "op_def") and op.op_def is not None, op return op.op_def.is_stateful # pylint: disable=protected-access class WhileOp(object): """Object for storing state for converting the outputs of a while_loop.""" def __init__(self, exit_node, pfor_ops): """Initializer. Args: exit_node: A tensor output from the while_loop. pfor_ops: list of ops inside the current pfor loop. """ self._pfor_ops = set(pfor_ops) self._pfor_op_ids = set([x._id for x in pfor_ops]) assert isinstance(exit_node, ops.Tensor) self._while_context = exit_node.op._get_control_flow_context() assert isinstance(self._while_context, control_flow_ops.WhileContext) self._context_name = self._while_context.name self._condition = self._while_context.pivot.op.inputs[0] # Parts of an external while_loop could be created inside a pfor loop. # However for the purpose here, we declare such loops to be external. Also # note that we check if the condition was created inside or outside to # determine if the while_loop was first created inside or outside. # TODO(agarwal): check that the Enter and Exit of this loop are unstacked. self._is_inside_loop = self.op_is_inside_loop(self._condition.op) if self._is_inside_loop: for e in self._while_context.loop_exits: assert self.op_is_inside_loop(e.op) # Note the code below tries to reverse engineer an existing while_loop graph # by assuming the following pattern of nodes. # # NextIteration <---- Body <--- Enter # | ^ # V ___| Y # Enter -> Merge -> Switch___ # ^ | N # | V # LoopCond Exit # Node that elements in the list below correspond one-to-one with each # other. i.e. these lists are the same size, and the i_th entry corresponds # to different Operations/Tensors of a single cycle as illustrated above. # List of Switch ops (ops.Operation) that feed into an Exit Node. self._exit_switches = [] # List of inputs (ops.Tensor) to NextIteration. self._body_outputs = [] # List of list of control inputs of the NextIteration nodes. self._next_iter_control_inputs = [] # List of Merge ops (ops.Operation). self._enter_merges = [] # List of output (ops.Tensor) of Exit nodes. self._outputs = [] # List of Enter Tensors. # There are two types of Enter nodes: # - The Enter nodes that are used in the `loop_vars` argument to # `while_loop` (see # https://www.tensorflow.org/api_docs/python/tf/while_loop). We collect # these Enter nodes immediately below by tracing backwards from the Exit # nodes via Exit <- Switch <- Merge <- Enter. You can see this chain in the # diagram above. This allows us to have a 1:1 correspondence between the # self._outputs and the first elements in self._enters. # - The Enter nodes that are used only by the body. They don't appear in the # `loop_vars` and are not returned from the `while_loop`. In Python code, # they are usually captured by the body lambda. We collect them below by # iterating over all the ops in the graph. They are appended to the end of # self._enters or self._direct_enters, and don't correspond to any outputs # in self._outputs. Note that we keep the resource/variant Enter nodes in # self._direct_enters and the constructed while_loop's body uses them # directly as opposed to passing them as loop variables. This is done # because the while_body cannot partition the resource/variant Tensors, so # it has to leave them unchanged. self._enters = [] self._direct_enters = [] for e in self._while_context.loop_exits: self._outputs.append(e.op.outputs[0]) switch = e.op.inputs[0].op assert switch.type == "Switch", switch self._exit_switches.append(switch) merge = switch.inputs[0].op assert merge.type == "Merge", merge self._enter_merges.append(merge) enter = merge.inputs[0].op assert enter.type == "Enter", enter self._enters.append(enter.outputs[0]) next_iter = merge.inputs[1].op assert next_iter.type == "NextIteration", next_iter self._body_outputs.append(next_iter.inputs[0]) self._next_iter_control_inputs.append(next_iter.control_inputs) # Collect all the Enter nodes that are not part of `loop_vars`, the second # category described above. # Also track whether the loop body has any stateful ops. self._is_stateful = False for op in ops.get_default_graph().get_operations(): # TODO(agarwal): make sure this works with nested case. control_flow_context = op._get_control_flow_context() if control_flow_context is None: continue if control_flow_context.name == self._context_name: self._is_stateful |= _is_stateful_pfor_op(op) if op.type == "Enter": output = op.outputs[0] if output not in self._enters: if output.dtype in (dtypes.resource, dtypes.variant): if output not in self._direct_enters: self._direct_enters.append(output) else: self._enters.append(output) def __str__(self): """String representation.""" return "while_loop(%s)" % self.name @property def inputs(self): """Input to all the Enter nodes.""" return [x.op.inputs[0] for x in self._enters + self._direct_enters] @property def control_inputs(self): """Control input to all the Enter nodes.""" control_inputs = [] for x in self._enters + self._direct_enters: control_inputs.extend(x.op.control_inputs) return control_inputs @property def outputs(self): """Outputs of all the Exit nodes.""" return self._outputs @property def name(self): """Context name for the while loop.""" return self._context_name @property def is_inside_loop(self): """Returns true if the while_loop was created inside the pfor.""" return self._is_inside_loop def op_is_inside_loop(self, op): """True if op was created inside the pfor loop body.""" assert isinstance(op, ops.Operation) # Note that we use self._pfor_op_ids for the check and not self._pfor_ops # since it appears there tensorflow API could return different python # objects representing the same Operation node. return op._id in self._pfor_op_ids @property def is_stateful(self): return self._is_stateful @property def pfor_converter(self): """Return a converter for the while loop.""" return self def _init_pfor(self, parent_pfor, indices, cond_stacked, inputs, inputs_stacked): """Create a PFor object for converting parts of the while_loop. Args: parent_pfor: PFor object being used for converting the while_loop. indices: int32 Tensor of ids for the iterations that are still active (i.e. did not exit the while_loop). cond_stacked: True if the while_loop condition is stacked. inputs: list of input Tensors corresponding 1-to-1 with self._enters. Note that these Tensors are a subset of the loop variables for the generated while_loop. inputs_stacked: List of booleans corresponding 1-to-1 with `inputs`, indicating if the value is stacked or not. Returns: A PFor instance. The instance is initialized by adding conversion mappings of nodes that will be external to the conversion that the returned instance will be used for. e.g. Enter nodes as well as Merge and Switch outputs are mapped to converted values. """ num_outputs = len(self._outputs) assert len(inputs) == len(self._enters) assert len(inputs_stacked) == len(self._enters) loop_var = parent_pfor.loop_var loop_len = array_ops.size(indices) pfor = PFor( loop_var, loop_len, pfor_ops=self._pfor_ops, all_indices=indices, all_indices_partitioned=cond_stacked) # Map all inputs of Enter nodes in self._direct_enters to their converted # values. for enter in self._direct_enters: enter_input = enter.op.inputs[0] converted_enter, stacked, is_sparse_stacked = parent_pfor._convert_helper( enter_input) # Since these are resources / variants, they should be unstacked. assert not stacked and not is_sparse_stacked, (enter, converted_enter) pfor._add_conversion(enter, wrap(converted_enter, False)) # Map all Enter nodes to the inputs. for enter, inp, stacked in zip(self._enters, inputs, inputs_stacked): pfor._add_conversion(enter, wrap(inp, stacked)) # Map outputs of Switch and Merge. for i in range(num_outputs): wrapped_inp = wrap(inputs[i], inputs_stacked[i]) merge = self._enter_merges[i] pfor._add_conversion(merge.outputs[0], wrapped_inp) # Note that second output of Merge is typically not used, except possibly # as a control dependency. To avoid trying to output the correct value, we # employ a hack here. We output a dummy invalid value with an incorrect # dtype. This will allow control dependency to work but if using it as an # input, it should typically lead to errors during graph construction due # to dtype mismatch. # TODO(agarwal): Check in the original graph to see if there are any # consumers of this Tensor that use it as an input. pfor._add_conversion(merge.outputs[1], wrap(constant_op.constant(-1.0), False)) switch = self._exit_switches[i] # Don't need to worry about switch.output[0] which will feed to Exit node. pfor._add_conversion(switch.outputs[1], wrapped_inp) return pfor def _convert_enter(self, parent_pfor, enter): """Converts an Enter node.""" inp, stacked, _ = parent_pfor._convert_helper(enter.op.inputs[0]) control_inputs = [ parent_pfor._convert_helper(x).t for x in enter.op.control_inputs ] if control_inputs: with ops.control_dependencies(control_inputs): inp = array_ops.identity(inp) return inp, stacked def _maybe_stacked(self, cache, inp): """Heuristic to figue out if the coverting inp leads to a stacked value. Args: cache: map from Tensor to boolean indicating stacked/unstacked. inp: input Tensor. Returns: True if `inp` could get stacked. If the function returns False, the converted value should be guaranteed to be unstacked. If returning True, it may or may not be stacked. """ if inp in cache: return cache[inp] if not self.op_is_inside_loop(inp.op): return False op = inp.op output = False if op.type in [ "Shape", "Rank" "ShapeN", "ZerosLike", "TensorArrayV3", "TensorArraySizeV3", ]: output = False elif _is_stateful_pfor_op(op): # This may be fairly aggressive. output = True elif op.type == "Exit": # This may be fairly aggressive. output = True else: for t in op.inputs: if self._maybe_stacked(cache, t): output = True break cache[inp] = output return output def _create_init_values(self, pfor_input): """Create arguments passed to converted while_loop.""" with ops.name_scope("while_init"): loop_len_vector = pfor_input.pfor.loop_len_vector loop_len = loop_len_vector[0] num_outputs = len(self._outputs) inputs = [] maybe_stacked_cache = {} # Convert all the Enters. Need to do this before checking for stacking # below. for i, enter in enumerate(self._enters): inp, stacked = self._convert_enter(pfor_input.pfor, enter) inputs.append(inp) maybe_stacked_cache[enter] = stacked # Since this enter node is part of the `loop_vars`, it corresponds to an # output and its preceding switch. We mark this switch's output the same # stackness, to act at the base case for the logic below. Below, we will # be going through the body figuring out which inputs might need to be # stacked and which inputs can safely remain unstacked. if i < num_outputs: maybe_stacked_cache[self._exit_switches[i].outputs[1]] = stacked # Shape invariants for init_values corresponding to self._enters. input_shape_invariants = [] # TensorArrays for outputs of converted while loop output_tas = [] # Shape invariants for output TensorArrays. ta_shape_invariants = [] # List of booleans indicating stackness of inputs, i.e. tensors # corresponding to self._enters. inputs_stacked = [] for i, inp in enumerate(inputs): enter = self._enters[i] inp_stacked = self._maybe_stacked(maybe_stacked_cache, enter) # Note that even when an input is unstacked, the body could make it # stacked. we use a heuristic below to figure out if body may be making # it stacked. if i < num_outputs: body_output = self._body_outputs[i] if enter.op in self._pfor_ops: body_output_stacked = self._maybe_stacked(maybe_stacked_cache, body_output) else: # If constructed outside of pfor loop, then the output would not be # stacked. body_output_stacked = False if body_output_stacked and not inp_stacked: inp = _stack(inp, loop_len_vector).t inputs[i] = inp inp_stacked = True # TODO(agarwal): other attributes for the TensorArray ? output_tas.append(tensor_array_ops.TensorArray(inp.dtype, loop_len)) ta_shape_invariants.append(tensor_shape.TensorShape(None)) inputs_stacked.append(inp_stacked) input_shape_invariants.append(tensor_shape.TensorShape(None)) # See documentation for __call__ for the structure of init_values. init_values = [True, pfor_input.pfor.all_indices] + inputs + output_tas # TODO(agarwal): try stricter shape invariants shape_invariants = ( [tensor_shape.TensorShape(None), tensor_shape.TensorShape(None) ] + input_shape_invariants + ta_shape_invariants) return init_values, inputs_stacked, shape_invariants def _process_cond_unstacked(self, conditions, indices, inputs, output_tas): """Handles case when condition is unstacked. Note that all iterations end together. So we don't need to partition the inputs. When all iterations are done, we write the inputs to the TensorArrays. Note that we only write to index 0 of output_tas. Since all iterations end together, they can all be output together. """ not_all_done = array_ops.reshape(conditions, []) new_output_tas = [] # pylint: disable=cell-var-from-loop for i, out_ta in enumerate(output_tas): inp = inputs[i] new_output_tas.append( control_flow_ops.cond(not_all_done, lambda: out_ta, lambda: out_ta.write(0, inp))) # pylint: enable=cell-var-from-loop return not_all_done, indices, inputs, new_output_tas def _process_cond_stacked(self, conditions, indices, inputs, inputs_stacked, output_tas): num_outputs = len(self._outputs) # Compute if all iterations are done. not_all_done = math_ops.reduce_any(conditions) conditions_int = math_ops.cast(conditions, dtypes.int32) # Partition the indices. done_indices, new_indices = data_flow_ops.dynamic_partition( indices, conditions_int, 2) new_inputs = [] new_output_tas = [] for i, (inp, stacked) in enumerate(zip(inputs, inputs_stacked)): # Partition the inputs. if stacked: done_inp, new_inp = data_flow_ops.dynamic_partition( inp, conditions_int, 2) else: # TODO(agarwal): avoid this stacking. See TODO earlier in # _process_cond_unstacked. done_inp = _stack(inp, [array_ops.size(done_indices)]).t new_inp = inp new_inputs.append(new_inp) # For iterations that are done, write them to TensorArrays. if i < num_outputs: out_ta = output_tas[i] # Note that done_indices can be empty. done_inp should also be empty in # that case. new_output_tas.append(out_ta.scatter(done_indices, done_inp)) return not_all_done, new_indices, new_inputs, new_output_tas def _process_body(self, pfor_input, inputs_stacked, new_indices, cond_stacked, new_inputs, not_all_done): """Convert the body function.""" def true_fn(control_inputs, body_pfor, body_output, stacked): """Converts the body function for all but last iteration. This essentially converts body_output. Additionally, it needs to handle any control dependencies on the NextIteration node. So it creates another Identity node with the converted dependencies. """ converted_control_inp = [] for x in control_inputs: for t in x.outputs: converted_control_inp.append(body_pfor._convert_helper(t).t) if stacked: # Note convert always does the stacking. output = body_pfor.convert(body_output) else: output, convert_stacked, _ = body_pfor._convert_helper(body_output) assert convert_stacked == stacked, body_output with ops.control_dependencies(converted_control_inp): return array_ops.identity(output) body_pfor = self._init_pfor(pfor_input.pfor, new_indices, cond_stacked, new_inputs, inputs_stacked) new_outputs = [] for i, (body_output, stacked) in enumerate( zip(self._body_outputs, inputs_stacked)): control_inp = self._next_iter_control_inputs[i] out_dtype = body_output.dtype # Note that we want to run the body only if not all pfor iterations are # done. If all are done, we return empty tensors since these values will # not be used. Notice that the value returned by the loop is based on # TensorArrays and not directly on these returned values. # pylint: disable=cell-var-from-loop new_output = control_flow_ops.cond( not_all_done, lambda: true_fn(control_inp, body_pfor, body_output, stacked), lambda: constant_op.constant([], dtype=out_dtype)) # pylint: enable=cell-var-from-loop new_outputs.append(new_output) return new_outputs def __call__(self, pfor_input): """Converter for the while_loop. The conversion of a while_loop is another while_loop. The arguments to this converted while_loop are as follows: not_all_done: Boolean scalar Tensor indicating if all the pfor iterations are done. indices: int32 1-D Tensor storing the id of the iterations that are not done. args: Remaining arguments. These can be divided into 3 categories: - First set of arguments are the tensors that correspond to the initial elements of self._enters. The elements that appear in original while loop's `loop_vars`. - The second set of arguments are the tensors that correspond to the remaining elements of self._enters. These are the tensors that directly enter the original while loop body. - Finally, the last set of arguments are TensorArrays. These TensorArrays correspond to the outputs of the original while_loop, i.e. to the elements in self._outputs. Each TensorArray has `PFor.loop_len` elements, i.e. the number of pfor iterations. At the end, the i'th element of each TensorArray will contain the output computed by the i'th iteration of pfor. Note that elements can be written into these tensors arrays in any order, depending on when the corresponding pfor iteration is done. If the original while_loop had `k` tensors in its `loop_vars` and its body directly captured `m` tensors, the `args` will contain `2 * k + m` values. In each iteration, the while_loop body recomputes the condition for all active pfor iterations to see which of them are now done. It then partitions all the inputs and passes them along to the converted body. Values for all the iterations that are done are written to TensorArrays indexed by the pfor iteration number. When all iterations are done, the TensorArrays are stacked to get the final value. Args: pfor_input: A PForInput object corresponding to the output of any Exit node from this while loop. Returns: List of converted outputs. """ # Create init_values that will be passed to the while_loop. init_values, inputs_stacked, shape_invariants = self._create_init_values( pfor_input) # Note that we use a list as a hack since we need the nested function body # to set the value of cond_is_stacked. python2.x doesn't support nonlocal # variables. cond_is_stacked = [None] def cond(not_all_done, *_): return not_all_done def body(not_all_done, indices, *args): # See documentatin for __call__ for the structure of *args. num_enters = len(self._enters) inputs = args[:num_enters] output_tas = args[num_enters:] # TODO(agarwal): see which outputs have consumers and only populate the # TensorArrays corresponding to those. Or do those paths get trimmed out # from inside the while_loop body? assert len(inputs) >= len(output_tas) assert len(inputs) == len(inputs_stacked) # Convert condition with ops.name_scope("while_cond"): # Note that we set cond_stacked to True here. At this point we don't # know if it could be loop invariant, hence the conservative value is # to assume stacked. cond_pfor = self._init_pfor(pfor_input.pfor, indices, cond_stacked=True, inputs=inputs, inputs_stacked=inputs_stacked) conditions, cond_stacked, _ = cond_pfor._convert_helper(self._condition) cond_is_stacked[0] = cond_stacked # Recompute the new condition, write outputs of done iterations, and # partition the inputs if needed. if not cond_stacked: (not_all_done, new_indices, new_inputs, new_output_tas) = self._process_cond_unstacked( conditions, indices, inputs, output_tas) else: (not_all_done, new_indices, new_inputs, new_output_tas) = self._process_cond_stacked( conditions, indices, inputs, inputs_stacked, output_tas) # Convert body with ops.name_scope("while_body"): # Compute the outputs from the body. new_outputs = self._process_body(pfor_input, inputs_stacked, new_indices, cond_stacked, new_inputs, not_all_done) # Note that the first num_outputs new values of inputs are computed using # the body. Rest of them were direct Enters into the condition/body and # the partitioning done earlier is sufficient to give the new value. num_outputs = len(self._outputs) new_args = ([not_all_done, new_indices] + new_outputs + list( new_inputs[num_outputs:]) + new_output_tas) return tuple(new_args) while_outputs = control_flow_ops.while_loop( cond, body, init_values, shape_invariants=shape_invariants) output_tas = while_outputs[-len(self._outputs):] outputs = [] assert cond_is_stacked[0] is not None for inp_stacked, ta in zip(inputs_stacked, output_tas): if cond_is_stacked[0]: outputs.append(wrap(ta.stack(), True)) else: # Note that if while_loop condition is unstacked, all iterations exit at # the same time and we wrote those outputs in index 0 of the tensor # array. outputs.append(wrap(ta.read(0), inp_stacked)) return outputs class _PforInput(object): """Input object passed to registered pfor converters.""" def __init__(self, pfor, op, inputs): """Creates a _PforInput object. Args: pfor: PFor converter object. op: the Operation object that is being converted. inputs: list of WrappedTensor objects representing converted values of the inputs of `op`. """ self.pfor = pfor self._op = op self._inputs = inputs def stack_inputs(self, stack_indices=None): """Stacks unstacked inputs at `stack_indices`. Args: stack_indices: indices of inputs at which stacking is done. If None, stacking is done at all indices. """ if stack_indices is None: stack_indices = range(len(self._inputs)) length = self.pfor.loop_len_vector for i in stack_indices: inp = self._inputs[i] if not inp.is_stacked: self._inputs[i] = _stack(inp.t, length) def expanddim_inputs_for_broadcast(self): """Reshapes stacked inputs to prepare them for broadcast. Since stacked inputs have an extra leading dimension, automatic broadcasting rules could incorrectly try to expand dimensions before that leading dimension. To avoid that, we reshape these stacked inputs to the maximum rank they will need to be broadcasted to. """ if not self._inputs: return # Find max rank def _get_rank(x): rank = array_ops.rank(x.t) if not x.is_stacked: rank += 1 return rank ranks = [_get_rank(x) for x in self._inputs] max_rank = ranks[0] for rank in ranks[1:]: max_rank = math_ops.maximum(rank, max_rank) for i, inp in enumerate(self._inputs): if inp.is_stacked: shape = array_ops.shape(inp.t) rank_diff = array_ops.reshape(max_rank - ranks[i], [1]) ones = array_ops.tile([1], rank_diff) new_shape = array_ops.concat([shape[:1], ones, shape[1:]], axis=0) self._inputs[i] = wrap(array_ops.reshape(inp.t, new_shape), True) @property def inputs(self): return self._inputs @property def num_inputs(self): return len(self._inputs) def input(self, index): assert len(self._inputs) > index, (index, self._inputs) return self._inputs[index] def stacked_input(self, index): t, is_stacked, _ = self.input(index) if not is_stacked: op_type = self.op_type op_def = getattr(self._op, "op_def", None) if op_def is None: input_name = "at index %d" % index else: input_name = "\"%s\"" % op_def.input_arg[index].name raise ValueError("Input %s of op \"%s\" expected to be not loop invariant" ".\nError while converting op %s" "with converted inputs\n%s" % (input_name, op_type, self._op, self.inputs)) return t def unstacked_input(self, index): t, is_stacked, _ = self.input(index) if is_stacked: op_type = self.op_type op_def = getattr(self._op, "op_def", None) if op_def is None: input_name = "at index %d" % index else: input_name = "\"%s\"" % op_def.input_arg[index].name raise ValueError("Input %s of op \"%s\" expected to be loop invariant" ".\nError while converting op %s" "with converted inputs\n%s" % (input_name, op_type, self._op, self.inputs)) return t @property def op(self): return self._op @property def op_type(self): return self._op.type def get_attr(self, attr): return self._op.get_attr(attr) @property def outputs(self): return self._op.outputs def output(self, index): assert index < len(self._op.outputs) return self._op.outputs[index] _pfor_converter_registry = {} class RegisterPFor(object): """Utility to register converters for pfor. Usage: @RegisterPFor(foo_op_type) def _foo_converter(pfor_input): ... The above will register conversion function `_foo_converter` for handling conversion of `foo_op_type`. During conversion, the registered functin will be called with a single argument of type `PForInput` which will contain state needed for the conversion. This registered function should output a list of WrappedTensor object with the same length as the number of outputs of op being converted. If the op had zero outputs, then it should return a ops.Operation object. """ def __init__(self, op_type): """Creates an object to register a converter for op with type `op_type`.""" self.op_type = op_type def __call__(self, converter): name = self.op_type assert name not in _pfor_converter_registry, "Re-registering %s " % name _pfor_converter_registry[name] = converter return converter class RegisterPForWithArgs(RegisterPFor): """Utility to register converters for pfor. Usage: @RegisteRPFor(foo_op_type, foo=value, ....) def _foo_converter(pfor_input, foo=None, ....): ... See RegisterPFor for details on the conversion function. `RegisterPForWithArgs` allows binding extra arguments to the conversion function at registration time. """ def __init__(self, op_type, *args, **kw_args): super(RegisterPForWithArgs, self).__init__(op_type) self._args = args self._kw_args = kw_args def __call__(self, converter): def _f(pfor_input): return converter(pfor_input, self.op_type, *self._args, **self._kw_args) super(RegisterPForWithArgs, self).__call__(_f) return converter def _create_op(op_type, inputs, op_dtypes, attrs=None): """Utility to create an op.""" return ops.get_default_graph().create_op( op_type, inputs, op_dtypes, attrs=attrs, compute_device=True) WrappedTensor = collections.namedtuple("WrappedTensor", ["t", "is_stacked", "is_sparse_stacked"]) """Wrapper around the result of a Tensor conversion. The additional fields are useful for keeping track of the conversion state as data flows through the ops in the loop body. For every op whose output is a Tensor, its converter should return either a WrappedTensor or a list of WrappedTensors. Args: t: The converted tensor is_stacked: True if the tensor is stacked, i.e. represents the results of all the iterations of the loop, where each row i of the tensor corresponds to that op's output on iteration i of the loop. False if the tensor is not stacked, i.e. represents the result of the op on of a single iteration of the loop, where the result does not vary between iterations. is_sparse_stacked: True if the tensor corresponds to a component tensor (indices, values, or dense_shape) of a sparse tensor, and has been logically stacked via a sparse conversion. """ def wrap(tensor, is_stacked=True, is_sparse_stacked=False): """Helper to create a WrappedTensor object.""" assert isinstance(is_stacked, bool) assert isinstance(is_sparse_stacked, bool) assert isinstance(tensor, ops.Tensor) assert not is_sparse_stacked or is_stacked, ("If the wrapped tensor is " "stacked via a sparse " "conversion, it must also be " "stacked.") return WrappedTensor(tensor, is_stacked, is_sparse_stacked) def _fallback_converter(pfor_input): logging.warn("Using a while_loop for converting %s", pfor_input.op_type) output_dtypes = [x.dtype for x in pfor_input.outputs] iters = pfor_input.pfor.loop_len_vector[0] def while_body(i, *ta_list): """Body of while loop.""" inputs = [ x[i, ...] if stacked else x for x, stacked, _ in pfor_input.inputs ] op_outputs = _create_op( pfor_input.op_type, inputs, output_dtypes, attrs=pfor_input.op.node_def.attr).outputs outputs = [] for out, ta in zip(op_outputs, ta_list): assert isinstance(out, ops.Tensor) outputs.append(ta.write(i, array_ops.expand_dims(out, 0))) return tuple([i + 1] + outputs) ta_list = control_flow_ops.while_loop( lambda i, *ta: i < iters, while_body, [0] + [ tensor_array_ops.TensorArray(dtype, iters) for dtype in output_dtypes ])[1:] return tuple([wrap(ta.concat(), True) for ta in ta_list]) class PFor(object): """Implementation of rewrite of parallel-for loops. This class takes a DAG or a set of DAGs representing the body of a parallel-for loop, and adds new operations to the graph that implements functionality equivalent to running that loop body for a specified number of iterations. This new set of nodes may or may not use a tensorflow loop construct. The process of conversion does not delete or change any existing operations. It only adds operations that efficiently implement the equivalent functionality. We refer to the added ops as "converted ops". The conversion process uses a simple greedy heuristic. It walks the loop body and tries to express the functionality of running each node in a loop with a new set of nodes. When converting an op several cases are possible: - The op is not inside the loop body. Hence it can be used as is. - The op does not depend on the iteration number and is stateless. In this case, it can be used as is. - The op is not stateful, and depends on iteration number only through control dependencies. In this case, we can create a single op with same inputs and attributes, but with "converted" control dependencies. - The op is not stateful, and all its inputs are loop invariant. In this case, similar to above, we can create a single op with same inputs and attributes, but with "converted" control dependencies. - The op is stateful or at least one of the inputs is not loop invariant. In this case, we run the registered converter for that op to create a set of converted ops. All nodes in the set will have converted control dependencies corresponding to control dependencies of the original op. If the op returned multiple outputs, "converted outputs" could be produced by different ops in this set. """ def __init__(self, loop_var, loop_len, pfor_ops, all_indices=None, all_indices_partitioned=False): """Creates an object to rewrite a parallel-for loop. Args: loop_var: ops.Tensor output of a Placeholder operation. The value should be an int32 scalar representing the loop iteration number. loop_len: A scalar or scalar Tensor representing the number of iterations the loop is run for. pfor_ops: List of all ops inside the loop body. all_indices: If not None, an int32 vector with size `loop_len` representing the iteration ids that are still active. These values should be unique and sorted. However they may not be contiguous. This is typically the case when inside a control flow construct which has partitioned the indices of the iterations that are being converted. all_indices_partitioned: If True, this object is being constructed from a control flow construct where not all the pfor iterations are guaranteed to be active. """ assert isinstance(loop_var, ops.Tensor) assert loop_var.op.type == "Placeholder" self._loop_var = loop_var loop_len_value = tensor_util.constant_value(loop_len) if loop_len_value is not None: loop_len = loop_len_value self._loop_len_vector = array_ops.reshape(loop_len, [1]) self._all_indices_partitioned = all_indices_partitioned if all_indices_partitioned: assert all_indices is not None self.all_indices = ( math_ops.range(loop_len) if all_indices is None else all_indices) self._conversion_map = {} self._conversion_map[loop_var] = wrap(self.all_indices, True) self._pfor_ops = set(pfor_ops) self._pfor_op_ids = set([x._id for x in pfor_ops]) def op_is_inside_loop(self, op): """True if op was created inside the pfor loop body.""" assert isinstance(op, ops.Operation) # Note that we use self._pfor_op_ids for the check and not self._pfor_ops # since it appears there tensorflow API could return different python # objects representing the same Operation node. return op._id in self._pfor_op_ids def _convert_sparse(self, y): """Returns the converted value corresponding to SparseTensor y. For SparseTensors, instead of stacking the component tensors separately, resulting in component tensors with shapes (N, m, rank), (N, m), and (N, rank) respectively for indices, values, and dense_shape (where N is the loop length and m is the number of sparse tensor values per loop iter), we want to logically stack the SparseTensors, to create a SparseTensor whose components are size (N * m, rank + 1), (N * m, ), and (rank + 1,) respectively. Here, we try to get the conversion of each component tensor. If the tensors are stacked via a sparse conversion, return the resulting SparseTensor composed of the converted components. Otherwise, the component tensors are either unstacked or stacked naively. In the latter case, we unstack the component tensors to reform loop_len SparseTensor elements, then correctly batch them. The unstacked tensors must have the same rank. Each dimension of each SparseTensor will expand to be the largest among all SparseTensor elements for that dimension. For example, if there are N SparseTensors of rank 3 being stacked, with N dense shapes, where the i_th shape is (x_i, y_i, z_i), the new dense shape will be (N, max_i(x_i), max_i(y_i), max_i(z_i)). Args: y: A tf.SparseTensor. Returns: A tf.SparseTensor that is the converted value corresponding to y. """ outputs = [ self._convert_helper(t) for t in (y.indices, y.values, y.dense_shape) ] assert all(isinstance(o, WrappedTensor) for o in outputs) if all(w.is_sparse_stacked for w in outputs): return sparse_tensor.SparseTensor(*[w.t for w in outputs]) assert not any(w.is_sparse_stacked for w in outputs), ( "Error converting SparseTensor. All components should be logically " "stacked, or none.") # If component tensors were not sparsely stacked, they are either unstacked # or stacked without knowledge that they are components of sparse tensors. # In this case, we have to restack them. return self._restack_sparse_tensor_logically( *[self._unwrap_or_tile(w) for w in outputs]) def _restack_sparse_tensor_logically(self, indices, values, shape): sparse_tensor_rank = indices.get_shape().dims[-1].value if sparse_tensor_rank is not None: sparse_tensor_rank += 1 def map_fn(args): res = gen_sparse_ops.serialize_sparse( args[0], args[1], args[2], out_type=dtypes.variant) return res # Applies a map function to the component tensors to serialize each # sparse tensor element and batch them all, then deserializes the batch. # TODO(rachelim): Try to do this without map_fn -- add the right offsets # to shape and indices tensors instead. result = functional_ops.map_fn( map_fn, [indices, values, shape], dtype=dtypes.variant) return sparse_ops.deserialize_sparse( result, dtype=values.dtype, rank=sparse_tensor_rank) def _unwrap_or_tile(self, wrapped_tensor): """Given a wrapped tensor, unwrap if stacked. Otherwise, tiles it.""" output, is_stacked = wrapped_tensor.t, wrapped_tensor.is_stacked if is_stacked: return output else: return _stack(output, self._loop_len_vector).t def convert(self, y): """Returns the converted value corresponding to y. Args: y: A ops.Tensor or a ops.Operation object. If latter, y should not have any outputs. Returns: If y does not need to be converted, it returns y as is. Else it returns the "converted value" corresponding to y. """ if y is None: return None if isinstance(y, sparse_tensor.SparseTensor): return self._convert_sparse(y) output = self._convert_helper(y) if isinstance(output, WrappedTensor): assert isinstance(y, ops.Tensor) return self._unwrap_or_tile(output) else: assert isinstance(y, ops.Operation) assert not y.outputs assert isinstance(output, ops.Operation) return output def _was_converted(self, t): """True if t is not a conversion of itself.""" converted_t = self._conversion_map[t] return converted_t.t is not t def _add_conversion(self, old_output, new_output): self._conversion_map[old_output] = new_output def _convert_helper(self, op_or_tensor): stack = [op_or_tensor] while stack: y = stack[0] if y in self._conversion_map: assert isinstance(self._conversion_map[y], (WrappedTensor, ops.Operation)) stack.pop(0) continue if isinstance(y, ops.Operation): assert not y.outputs, ( "We only support converting Operation objects with no outputs. " "Got %s", y) y_op = y else: assert isinstance(y, ops.Tensor), y y_op = y.op is_while_loop = y_op.type == "Exit" if is_while_loop: while_op = WhileOp(y, pfor_ops=self._pfor_ops) is_inside_loop = while_op.is_inside_loop # If all nodes in the while_loop graph were created inside the pfor, we # treat the whole loop subgraph as a single op (y_op) and try to convert # it. For while_loops that are created completely or partially outside, # we treat them as external and should be able to simply return the Exit # node output as is without needing any conversion. Note that for # while_loops that are partially constructed inside, we assume they will # be loop invariant. If that is not the case, it will create runtime # errors since the converted graph would depend on the self._loop_var # placeholder. if is_inside_loop: y_op = while_op else: is_inside_loop = self.op_is_inside_loop(y_op) # If this op was not created inside the loop body, we will return as is. # 1. Convert inputs and control inputs. def _add_to_stack(x): if x not in self._conversion_map: stack.insert(0, x) return True else: return False if is_inside_loop: added_to_stack = False for inp in y_op.inputs: added_to_stack |= _add_to_stack(inp) for cinp in y_op.control_inputs: if cinp.outputs: for t in cinp.outputs: added_to_stack |= _add_to_stack(t) else: added_to_stack |= _add_to_stack(cinp) if added_to_stack: continue converted_inputs = [self._conversion_map[inp] for inp in y_op.inputs] some_input_converted = any( [self._was_converted(x) for x in y_op.inputs]) some_input_stacked = any([x.is_stacked for x in converted_inputs]) converted_control_ops = set() some_control_input_converted = False for cinp in y_op.control_inputs: if cinp.outputs: for t in cinp.outputs: converted_t = self._conversion_map[t] if self._was_converted(t): some_control_input_converted = True converted_control_ops.add(converted_t.t.op) else: converted_cinp = self._conversion_map[cinp] assert isinstance(converted_cinp, ops.Operation) if converted_cinp != cinp: some_control_input_converted = True converted_control_ops.add(converted_cinp) converted_control_ops = list(converted_control_ops) is_stateful = _is_stateful_pfor_op(y_op) else: converted_inputs = [] converted_control_ops = [] logging.vlog(3, "converting op:%s\ninputs:%s\ncontrol_inputs:%s", y_op, converted_inputs, converted_control_ops) # 2. Convert y_op # If converting a while_loop, we let the while_loop convertor deal with # putting the control dependencies appropriately. control_dependencies = [] if is_while_loop else converted_control_ops with ops.control_dependencies(control_dependencies), ops.name_scope( y_op.name + "/pfor/"): # None of the inputs and control inputs were converted. if (not is_inside_loop or (not is_stateful and not some_input_converted and not some_control_input_converted)): if y == y_op: assert not isinstance(y_op, WhileOp) new_outputs = y_op else: new_outputs = [wrap(x, False) for x in y_op.outputs] elif not (is_stateful or is_while_loop or some_input_stacked): # All inputs are unstacked or uncoverted but some control inputs are # converted. # TODO(rachelim): Handle the case where some inputs are sparsely # stacked (i.e. any([x.is_sparse_stacked for x in converted_inputs])) new_op = _create_op(y_op.type, [x.t for x in converted_inputs], [x.dtype for x in y_op.outputs], y_op.node_def.attr) if y == y_op: new_outputs = new_op else: new_outputs = [wrap(x, False) for x in new_op.outputs] else: # Either some inputs are not loop invariant or op is stateful. if hasattr(y_op, "pfor_converter"): converter = y_op.pfor_converter else: converter = _pfor_converter_registry.get(y_op.type, None) if converter is None: if flags.FLAGS.op_conversion_fallback_to_while_loop: converter = _fallback_converter else: raise ValueError( "No converter defined for %s\n%s\ninputs: %s. " "\nEither add a converter or set " "--op_conversion_fallback_to_while_loop=True, " "which may run slower" % (y_op.type, y_op, converted_inputs)) # TODO(rachelim): Handle the case where some inputs are sparsely # stacked. We should only call the converter if it supports handling # those inputs. new_outputs = converter(_PforInput(self, y_op, converted_inputs)) if isinstance(new_outputs, WrappedTensor): new_outputs = [new_outputs] assert isinstance(new_outputs, (list, tuple, ops.Operation)), new_outputs logging.vlog(2, "converted %s %s", y_op, new_outputs) # Insert into self._conversion_map if y == y_op: assert isinstance(new_outputs, ops.Operation) self._add_conversion(y_op, new_outputs) else: for old_output, new_output in zip(y_op.outputs, new_outputs): assert isinstance(new_output, WrappedTensor), (new_output, y, y_op) self._add_conversion(old_output, new_output) stack.pop(0) return self._conversion_map[op_or_tensor] @property def loop_len_vector(self): """Returns a single element vector whose value is number of iterations.""" return self._loop_len_vector @property def loop_var(self): """Returns placeholder loop variable.""" return self._loop_var @property def pfor_ops(self): return self._pfor_ops @property def all_indices_partitioned(self): """all_indices_partitioned property. Returns: True if we are inside a control flow construct and not all pfor iterations may be active. """ return self._all_indices_partitioned # nn_ops def _flatten_first_two_dims(x): """Merges first two dimensions.""" old_shape = array_ops.shape(x) new_shape = array_ops.concat([[-1], old_shape[2:]], axis=0) return array_ops.reshape(x, new_shape) def _unflatten_first_dim(x, first_dim): """Splits first dimension into [first_dim, -1].""" old_shape = array_ops.shape(x) new_shape = array_ops.concat([first_dim, [-1], old_shape[1:]], axis=0) return array_ops.reshape(x, new_shape) def _inputs_with_flattening(pfor_input, input_indices): """Stacks and flattens first dim of inputs at indices `input_indices`.""" if input_indices is None: input_indices = [] pfor_input.stack_inputs(stack_indices=input_indices) inputs = [] for i in range(pfor_input.num_inputs): if i in input_indices: inp = pfor_input.stacked_input(i) inp = _flatten_first_two_dims(inp) else: inp = pfor_input.unstacked_input(i) inputs.append(inp) return inputs @RegisterPForWithArgs("Conv2D", dims=[0]) @RegisterPForWithArgs("AvgPool", dims=[0]) @RegisterPForWithArgs("MaxPool", dims=[0]) @RegisterPForWithArgs("MaxPoolGrad", dims=[0, 1, 2]) @RegisterPForWithArgs("SoftmaxCrossEntropyWithLogits", dims=[0, 1]) def _convert_flatten_batch(pfor_input, op_type, dims): del op_type inputs = _inputs_with_flattening(pfor_input, dims) outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs n = pfor_input.pfor.loop_len_vector outputs = [_unflatten_first_dim(x, n) for x in outputs] return [wrap(x, True) for x in outputs] _channel_flatten_input_cache = {} def _channel_flatten_input(x, data_format): """Merge the stack dimension with the channel dimension. If S is pfor's stacking dimension, then, - for SNCHW, we transpose to NSCHW. If N dimension has size 1, the transpose should be cheap. - for SNHWC, we transpose to NHWCS. We then merge the S and C dimension. Args: x: ops.Tensor to transform. data_format: "NCHW" or "NHWC". Returns: A 3-element tuple with the transformed value, along with the shape for reshape and order for transpose required to transform back. """ graph = ops.get_default_graph() cache_key = (graph, x, data_format) if cache_key not in _channel_flatten_input_cache: x_shape = array_ops.shape(x) if data_format == b"NCHW": order = [1, 0, 2, 3, 4] shape = array_ops.concat([x_shape[1:2], [-1], x_shape[3:]], axis=0) reverse_order = order else: order = [1, 2, 3, 0, 4] shape = array_ops.concat([x_shape[1:4], [-1]], axis=0) reverse_order = [3, 0, 1, 2, 4] # Move S dimension next to C dimension. x = array_ops.transpose(x, order) reverse_shape = array_ops.shape(x) # Reshape to merge the S and C dimension. x = array_ops.reshape(x, shape) outputs = x, reverse_order, reverse_shape _channel_flatten_input_cache[cache_key] = outputs else: outputs = _channel_flatten_input_cache[cache_key] return outputs # Note that with training=True, running FusedBatchNorm on individual examples # is very different from running FusedBatchNorm on a batch of those examples. # This is because, for the latter case, the operation can be considered as first # computing the mean and variance over all the examples and then using these # to scale all those examples. This creates a data dependency between these # different "iterations" since the inputs to the scaling step depends on the # statistics coming from all these inputs. # As with other kernels, the conversion here effectively runs the kernel # independently for each iteration, and returns outputs by stacking outputs from # each of those iterations. @RegisterPFor("FusedBatchNorm") def _convert_fused_batch_norm(pfor_input): is_training = pfor_input.get_attr("is_training") # When BatchNorm is used with training=False, mean and variance are provided # externally and used as is by the op. Thus, we can merge the S and N # dimensions as we do for regular operations. # When BatchNorm is used with training=True, mean and variance are computed # for each channel across the batch dimension (first one). If we merge S and N # dimensions, mean and variances will be computed over a larger set. So, we # merge the S and C dimensions instead. if not is_training: # We return zeros for batch_mean and batch_variance output. Note that CPU # and GPU seem to have different behavior for those two outputs. CPU outputs # zero because these values are not used during inference. GPU outputs # something, probably real means and variances. inputs = _inputs_with_flattening(pfor_input, [0]) outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs y = outputs[0] n = pfor_input.pfor.loop_len_vector y = _unflatten_first_dim(y, n) mean = pfor_input.unstacked_input(3) zeros = array_ops.zeros_like(mean) return [wrap(y, True), wrap(zeros, False), wrap(zeros, False)] pfor_input.stack_inputs() data_format = pfor_input.get_attr("data_format") # We merge the first dimension with the "C" dimension, run FusedBatchNorm, and # then transpose back. x = pfor_input.stacked_input(0) x, reverse_order, reverse_shape = _channel_flatten_input(x, data_format) # Note that we stack all the other inputs as well so that they are the same # size as the new size of the channel dimension. inputs = [x] + [ array_ops.reshape(pfor_input.stacked_input(i), [-1]) for i in range(1, pfor_input.num_inputs) ] outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs y = outputs[0] y = array_ops.reshape(y, reverse_shape) y = array_ops.transpose(y, reverse_order) n = pfor_input.pfor.loop_len_vector outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]] outputs = [y] + outputs return [wrap(x, True) for x in outputs] @RegisterPFor("FusedBatchNormGrad") def _convert_fused_batch_norm_grad(pfor_input): pfor_input.stack_inputs() data_format = pfor_input.get_attr("data_format") y_backprop = pfor_input.stacked_input(0) y_backprop, _, _ = _channel_flatten_input(y_backprop, data_format) x = pfor_input.stacked_input(1) x, x_reverse_order, x_reverse_shape = _channel_flatten_input(x, data_format) inputs = [y_backprop, x] + [ array_ops.reshape(pfor_input.stacked_input(i), [-1]) for i in range(2, pfor_input.num_inputs) ] outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs x_backprop = outputs[0] x_backprop = array_ops.reshape(x_backprop, x_reverse_shape) x_backprop = array_ops.transpose(x_backprop, x_reverse_order) n = pfor_input.pfor.loop_len_vector outputs = [_unflatten_first_dim(x, n) for x in outputs[1:]] outputs = [x_backprop] + outputs return [wrap(output, True) for output in outputs] @RegisterPForWithArgs("Conv2DBackpropInput", flatten_dims=[2], shape_dim=0) @RegisterPForWithArgs("AvgPoolGrad", flatten_dims=[1], shape_dim=0) def _convert_flatten_batch_shape_input(pfor_input, op_type, flatten_dims, shape_dim): del op_type inputs = _inputs_with_flattening(pfor_input, flatten_dims) n = pfor_input.pfor.loop_len_vector # Adjust the `input_sizes` input. ones = array_ops.ones( [array_ops.shape(inputs[shape_dim])[0] - 1], dtype=n.dtype) inputs[shape_dim] *= array_ops.concat([n, ones], axis=0) outputs = _create_op( pfor_input.op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs outputs = [_unflatten_first_dim(x, n) for x in outputs] return [wrap(x, True) for x in outputs] @RegisterPFor("Conv2DBackpropFilter") def _convert_conv2d_backprop_filter(pfor_input): pfor_input.stack_inputs(stack_indices=[2]) inputs, inputs_stacked, _ = pfor_input.input(0) filter_sizes = pfor_input.unstacked_input(1) grads = pfor_input.stacked_input(2) strides = pfor_input.get_attr("strides") padding = pfor_input.get_attr("padding") use_cudnn_on_gpu = pfor_input.get_attr("use_cudnn_on_gpu") data_format = pfor_input.get_attr("data_format") dilations = pfor_input.get_attr("dilations") if inputs_stacked: # TODO(agarwal): Implement this efficiently. logging.warn("Conv2DBackpropFilter uses a while_loop. Fix that!") def while_body(i, ta): inp_i = inputs[i, ...] grad_i = grads[i, ...] output = nn_ops.conv2d_backprop_filter( inp_i, filter_sizes, grad_i, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, dilations=dilations) return i + 1, ta.write(i, array_ops.expand_dims(output, 0)) n = array_ops.reshape(pfor_input.pfor.loop_len_vector, []) _, ta = control_flow_ops.while_loop( lambda i, ta: i < n, while_body, (0, tensor_array_ops.TensorArray(inputs.dtype, n))) output = ta.concat() return wrap(output, True) else: # We merge the stack dimension with the channel dimension of the gradients # and pretend we had a larger filter (see change to filter_sizes below). # Once the filter backprop is computed, we reshape and transpose back # appropriately. grads, _, _ = _channel_flatten_input(grads, data_format) n = pfor_input.pfor.loop_len_vector old_filter_sizes = filter_sizes filter_sizes *= array_ops.concat([[1, 1, 1], n], axis=0) output = nn_ops.conv2d_backprop_filter( inputs, filter_sizes, grads, strides=strides, padding=padding, use_cudnn_on_gpu=use_cudnn_on_gpu, data_format=data_format, dilations=dilations) new_filter_shape = array_ops.concat([old_filter_sizes[:3], n, [-1]], axis=0) output = array_ops.reshape(output, new_filter_shape) output = array_ops.transpose(output, [3, 0, 1, 2, 4]) return wrap(output, True) # array_ops @RegisterPForWithArgs("Identity", array_ops.identity) @RegisterPForWithArgs("StopGradient", array_ops.stop_gradient) def _convert_identity(pfor_input, op_type, op_func): del op_type return wrap(op_func(*[x.t for x in pfor_input.inputs]), True) @RegisterPFor("IdentityN") def _convert_identity_n(pfor_input): outputs = array_ops.identity_n([x.t for x in pfor_input.inputs]) return [wrap(out, inp.is_stacked) for out, inp in zip(outputs, pfor_input.inputs)] @RegisterPFor("Reshape") def _convert_reshape(pfor_input): t = pfor_input.stacked_input(0) shape = pfor_input.unstacked_input(1) new_dim = array_ops.shape(t)[:1] new_shape = array_ops.concat([new_dim, shape], axis=0) return wrap(array_ops.reshape(t, new_shape), True) @RegisterPFor("ExpandDims") def _convert_expanddims(pfor_input): t = pfor_input.stacked_input(0) dim = pfor_input.unstacked_input(1) dim += math_ops.cast(dim >= 0, dtypes.int32) return wrap(array_ops.expand_dims(t, axis=dim), True) @RegisterPFor("Slice") def _convert_slice(pfor_input): t = pfor_input.stacked_input(0) begin = pfor_input.unstacked_input(1) size = pfor_input.unstacked_input(2) begin = array_ops.concat([[0], begin], axis=0) size = array_ops.concat([[-1], size], axis=0) return wrap(array_ops.slice(t, begin, size), True) @RegisterPFor("Tile") def _convert_tile(pfor_input): t = pfor_input.stacked_input(0) multiples = pfor_input.unstacked_input(1) multiples = array_ops.concat([[1], multiples], 0) return wrap(array_ops.tile(t, multiples), True) @RegisterPFor("Pack") def _convert_pack(pfor_input): pfor_input.stack_inputs() axis = pfor_input.get_attr("axis") if axis >= 0: axis += 1 return wrap( array_ops.stack([x.t for x in pfor_input.inputs], axis=axis), True) @RegisterPFor("Unpack") def _convert_unpack(pfor_input): value = pfor_input.stacked_input(0) axis = pfor_input.get_attr("axis") if axis >= 0: axis += 1 num = pfor_input.get_attr("num") return [wrap(x, True) for x in array_ops.unstack(value, axis=axis, num=num)] @RegisterPFor("Pad") def _convert_pad(pfor_input): t = pfor_input.stacked_input(0) paddings = pfor_input.unstacked_input(1) paddings = array_ops.concat([[[0, 0]], paddings], 0) return wrap(array_ops.pad(t, paddings, mode="CONSTANT"), True) @RegisterPFor("Split") def _convert_split(pfor_input): split_dim = pfor_input.unstacked_input(0) t = pfor_input.stacked_input(1) num_split = pfor_input.get_attr("num_split") split_dim += math_ops.cast(split_dim >= 0, dtypes.int32) return [wrap(x, True) for x in array_ops.split(t, num_split, axis=split_dim)] @RegisterPFor("SplitV") def _convert_split_v(pfor_input): t = pfor_input.stacked_input(0) splits = pfor_input.unstacked_input(1) split_dim = pfor_input.unstacked_input(2) split_dim += math_ops.cast(split_dim >= 0, dtypes.int32) return [wrap(x, True) for x in array_ops.split(t, splits, axis=split_dim)] @RegisterPFor("Transpose") def _convert_transpose(pfor_input): t = pfor_input.stacked_input(0) perm = pfor_input.unstacked_input(1) new_perm = array_ops.concat([[0], perm + 1], axis=0) return wrap(array_ops.transpose(t, new_perm), True) @RegisterPFor("ZerosLike") def _convert_zeroslike(pfor_input): t = pfor_input.stacked_input(0) shape = array_ops.shape(t)[1:] return wrap(array_ops.zeros(shape, dtype=t.dtype), False) @RegisterPFor("Gather") @RegisterPFor("GatherV2") def _convert_gather(pfor_input): param, param_stacked, _ = pfor_input.input(0) indices, indices_stacked, _ = pfor_input.input(1) op_type = pfor_input.op_type if op_type == "Gather": validate_indices = pfor_input.get_attr("validate_indices") axis = 0 else: validate_indices = None axis = pfor_input.unstacked_input(2) axis_value = tensor_util.constant_value(axis) if axis_value is not None: axis = axis_value if indices_stacked and not param_stacked: if indices == pfor_input.pfor.all_indices and axis == 0: param_shape0 = param.shape.dims[0].value indices_shape0 = indices.shape.dims[0].value if param_shape0 is not None and indices_shape0 == param_shape0: # Note that with loops and conditionals, indices may not be contiguous. # However they will be sorted and unique. So if the shape matches, then # it must be picking up all the rows of param. return wrap(param, True) # TODO(agarwal): use array_ops.slice here. output = array_ops.gather( param, indices, validate_indices=validate_indices, axis=axis) if axis != 0: axis = control_flow_ops.cond( axis < 0, lambda: axis + array_ops.rank(param), lambda: axis) order = array_ops.concat( [[axis], math_ops.range(axis), math_ops.range(axis + 1, array_ops.rank(output))], axis=0) output = control_flow_ops.cond( math_ops.equal(axis, 0), lambda: output, lambda: array_ops.transpose(output, order)) return wrap(output, True) if param_stacked: loop_len_vector = pfor_input.pfor.loop_len_vector pfor_input.stack_inputs(stack_indices=[1]) indices = pfor_input.stacked_input(1) param_flat = _flatten_first_two_dims(param) # Recompute indices to handle stacked param. indices_offset = math_ops.range( loop_len_vector[0]) * array_ops.shape(param)[1] # Reshape indices_offset to allow broadcast addition ones = array_ops.ones([array_ops.rank(indices) - 1], dtype=dtypes.int32) new_shape = array_ops.concat([loop_len_vector, ones], axis=0) indices_offset = array_ops.reshape(indices_offset, new_shape) indices += indices_offset # TODO(agarwal): handle axis != 0. May need to transpose param or # array_ops.gather_nd. if isinstance(axis, ops.Tensor): axis_value = tensor_util.constant_value(axis) else: try: axis_value = int(axis) except TypeError: axis_value = None msg = ("Gather, where indices and param are both loop dependent, currently " "requires axis=0") if axis_value is not None and axis_value != 0: raise ValueError("Error while converting %s. %s. Got axis=%d" % (pfor_input.op, msg, axis)) with ops.control_dependencies( [check_ops.assert_equal(axis, 0, message=msg)]): output = array_ops.gather(param_flat, indices) return wrap(output, True) @RegisterPFor("ConcatV2") def _convert_concatv2(pfor_input): n = pfor_input.num_inputs pfor_input.stack_inputs(stack_indices=range(n - 1)) axis = pfor_input.unstacked_input(n - 1) axis += math_ops.cast(axis >= 0, axis.dtype) return wrap( array_ops.concat([x.t for x in pfor_input.inputs[:n - 1]], axis=axis), True) @RegisterPFor("StridedSlice") def _convert_strided_slice(pfor_input): inp = pfor_input.stacked_input(0) begin = pfor_input.unstacked_input(1) end = pfor_input.unstacked_input(2) strides = pfor_input.unstacked_input(3) begin_mask = pfor_input.get_attr("begin_mask") end_mask = pfor_input.get_attr("end_mask") ellipsis_mask = pfor_input.get_attr("ellipsis_mask") new_axis_mask = pfor_input.get_attr("new_axis_mask") shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask") begin = array_ops.concat([[0], begin], axis=0) end = array_ops.concat([[0], end], axis=0) strides = array_ops.concat([[1], strides], axis=0) begin_mask = begin_mask << 1 | 1 end_mask = end_mask << 1 | 1 ellipsis_mask <<= 1 new_axis_mask <<= 1 shrink_axis_mask <<= 1 return wrap( array_ops.strided_slice( inp, begin, end, strides, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask), True) @RegisterPFor("StridedSliceGrad") def _convert_strided_slice_grad(pfor_input): shape = pfor_input.unstacked_input(0) begin = pfor_input.unstacked_input(1) end = pfor_input.unstacked_input(2) strides = pfor_input.unstacked_input(3) dy = pfor_input.stacked_input(4) begin_mask = pfor_input.get_attr("begin_mask") end_mask = pfor_input.get_attr("end_mask") ellipsis_mask = pfor_input.get_attr("ellipsis_mask") new_axis_mask = pfor_input.get_attr("new_axis_mask") shrink_axis_mask = pfor_input.get_attr("shrink_axis_mask") shape = array_ops.concat([pfor_input.pfor.loop_len_vector, shape], axis=0) begin = array_ops.concat([[0], begin], axis=0) end = array_ops.concat([[0], end], axis=0) strides = array_ops.concat([[1], strides], axis=0) begin_mask = begin_mask << 1 | 1 end_mask = end_mask << 1 | 1 ellipsis_mask <<= 1 new_axis_mask <<= 1 shrink_axis_mask <<= 1 return wrap( array_ops.strided_slice_grad( shape, begin, end, strides, dy, begin_mask=begin_mask, end_mask=end_mask, ellipsis_mask=ellipsis_mask, new_axis_mask=new_axis_mask, shrink_axis_mask=shrink_axis_mask), True) # math_ops @RegisterPFor("MatMul") def _convert_matmul(pfor_input): # TODO(agarwal): Check if tiling is faster than two transposes. a, a_stacked, _ = pfor_input.input(0) b, b_stacked, _ = pfor_input.input(1) tr_a = pfor_input.get_attr("transpose_a") tr_b = pfor_input.get_attr("transpose_b") if a_stacked and b_stacked: output = wrap(math_ops.matmul(a, b, adjoint_a=tr_a, adjoint_b=tr_b), True) return output elif a_stacked: if tr_a: a = array_ops.transpose(a, [0, 2, 1]) if a.shape.is_fully_defined(): x, y, z = a.shape else: x, y, z = [ array_ops.reshape(i, []) for i in array_ops.split(array_ops.shape(a), 3) ] a = array_ops.reshape(a, [x * y, z]) prod = math_ops.matmul(a, b, transpose_b=tr_b) return wrap(array_ops.reshape(prod, [x, y, -1]), True) else: assert b_stacked if tr_b: perm = [2, 0, 1] b = array_ops.transpose(b, perm) else: # As an optimization, if one of the first two dimensions is 1, then we can # reshape instead of transpose. # TODO(agarwal): This check can be done inside Transpose kernel. b_shape = array_ops.shape(b) min_dim = math_ops.minimum(b_shape[0], b_shape[1]) perm = control_flow_ops.cond( math_ops.equal(min_dim, 1), lambda: [0, 1, 2], lambda: [1, 0, 2]) new_shape = array_ops.stack([b_shape[1], b_shape[0], b_shape[2]]) b = array_ops.transpose(b, perm) b = array_ops.reshape(b, new_shape) if b.shape.is_fully_defined(): x, y, z = b.shape else: x, y, z = [ array_ops.reshape(i, []) for i in array_ops.split(array_ops.shape(b), 3) ] b = array_ops.reshape(b, [x, y * z]) prod = math_ops.matmul(a, b, transpose_a=tr_a) prod = array_ops.reshape(prod, [-1, y, z]) prod = array_ops.transpose(prod, [1, 0, 2]) return wrap(prod, True) @RegisterPFor("BatchMatMul") def _convert_batch_mat_mul(pfor_input): # TODO(agarwal): There may be a more efficient way to do this instead of # stacking the inputs. pfor_input.stack_inputs() x = pfor_input.stacked_input(0) y = pfor_input.stacked_input(1) adj_x = pfor_input.get_attr("adj_x") adj_y = pfor_input.get_attr("adj_y") x = _flatten_first_two_dims(x) y = _flatten_first_two_dims(y) output = math_ops.matmul(x, y, adjoint_a=adj_x, adjoint_b=adj_y) output = _unflatten_first_dim(output, pfor_input.pfor.loop_len_vector) return wrap(output, True) @RegisterPForWithArgs("Sum", math_ops.reduce_sum) @RegisterPForWithArgs("Prod", math_ops.reduce_prod) @RegisterPForWithArgs("Max", math_ops.reduce_max) @RegisterPForWithArgs("Min", math_ops.reduce_min) def _convert_reduction(pfor_input, _, op_func): t = pfor_input.stacked_input(0) indices = pfor_input.unstacked_input(1) # Shift positive indices by one to account for the extra dimension. indices += math_ops.cast(indices >= 0, dtypes.int32) keep_dims = pfor_input.get_attr("keep_dims") return wrap(op_func(t, indices, keepdims=keep_dims), True) @RegisterPForWithArgs("Cumsum", math_ops.cumsum) @RegisterPForWithArgs("Cumprod", math_ops.cumprod) def _convert_cumfoo(pfor_input, _, op_func): t = pfor_input.stacked_input(0) axis = pfor_input.unstacked_input(1) # Shift positive indices by one to account for the extra dimension. axis += math_ops.cast(axis >= 0, dtypes.int32) exclusive = pfor_input.get_attr("exclusive") reverse = pfor_input.get_attr("reverse") return wrap(op_func(t, axis, exclusive=exclusive, reverse=reverse), True) @RegisterPFor("BiasAdd") def _convert_biasadd(pfor_input): t = pfor_input.stacked_input(0) bias = pfor_input.unstacked_input(1) data_format = pfor_input.get_attr("data_format") if data_format != b"NCHW": return wrap(nn_ops.bias_add(t, bias, data_format=data_format), True) shape = array_ops.shape(t) flattened_shape = array_ops.concat([[-1], shape[2:]], axis=0) t = array_ops.reshape(t, flattened_shape) t = nn_ops.bias_add(t, bias, data_format=b"NCHW") t = array_ops.reshape(t, shape) return wrap(t, True) @RegisterPFor("UnsortedSegmentSum") def _convert_unsortedsegmentsum(pfor_input): data, data_stacked, _ = pfor_input.input(0) # TODO(agarwal): handle unstacked? segment_ids = pfor_input.stacked_input(1) # TODO(agarwal): handle stacked? num_segments = pfor_input.unstacked_input(2) if not data_stacked: data = _stack(data, pfor_input.pfor.loop_len_vector).t segment_shape = array_ops.shape(segment_ids) n = segment_shape[0] ones = array_ops.ones_like(segment_shape)[1:] segment_offset = num_segments * math_ops.range(n) segment_offset = array_ops.reshape(segment_offset, array_ops.concat([[n], ones], axis=0)) segment_ids += segment_offset num_segments = math_ops.cast(num_segments, dtypes.int64) * math_ops.cast( n, dtypes.int64) output = math_ops.unsorted_segment_sum(data, segment_ids, num_segments) new_output_shape = array_ops.concat( [[n, -1], array_ops.shape(output)[1:]], axis=0) output = array_ops.reshape(output, new_output_shape) return wrap(output, True) @RegisterPFor("Cast") def _convert_cast(pfor_input): inp = pfor_input.stacked_input(0) dtype = pfor_input.get_attr("DstT") return wrap(math_ops.cast(inp, dtype), True) @RegisterPForWithArgs("Abs", math_ops.abs) @RegisterPForWithArgs("Acosh", math_ops.acosh) @RegisterPForWithArgs("Acos", math_ops.acos) @RegisterPForWithArgs("Add", math_ops.add) @RegisterPForWithArgs("AddV2", math_ops.add_v2) @RegisterPForWithArgs("Angle", math_ops.angle) @RegisterPForWithArgs("Asinh", math_ops.asinh) @RegisterPForWithArgs("Asin", math_ops.asin) @RegisterPForWithArgs("Atan2", math_ops.atan2) @RegisterPForWithArgs("Atanh", math_ops.atanh) @RegisterPForWithArgs("Atan", math_ops.atan) @RegisterPForWithArgs("BesselI0e", math_ops.bessel_i0e) @RegisterPForWithArgs("BesselI1e", math_ops.bessel_i1e) @RegisterPForWithArgs("BitwiseAnd", bitwise_ops.bitwise_and) @RegisterPForWithArgs("BitwiseOr", bitwise_ops.bitwise_or) @RegisterPForWithArgs("BitwiseXor", bitwise_ops.bitwise_xor) @RegisterPForWithArgs("Ceil", math_ops.ceil) @RegisterPForWithArgs("ComplexAbs", math_ops.complex_abs) @RegisterPForWithArgs("Complex", math_ops.complex) @RegisterPForWithArgs("Conj", math_ops.conj) @RegisterPForWithArgs("Cosh", math_ops.cosh) @RegisterPForWithArgs("Cos", math_ops.cos) @RegisterPForWithArgs("Digamma", math_ops.digamma) @RegisterPForWithArgs("Div", math_ops.div) @RegisterPForWithArgs("DivNoNan", math_ops.div_no_nan) @RegisterPForWithArgs("Elu", nn_ops.elu) @RegisterPForWithArgs("Equal", math_ops.equal) @RegisterPForWithArgs("Erfc", math_ops.erfc) @RegisterPForWithArgs("Erf", math_ops.erf) @RegisterPForWithArgs("Expm1", math_ops.expm1) @RegisterPForWithArgs("Exp", math_ops.exp) @RegisterPForWithArgs("FloorDiv", math_ops.floor_div) @RegisterPForWithArgs("Floor", math_ops.floor) @RegisterPForWithArgs("FloorMod", math_ops.floor_mod) @RegisterPForWithArgs("GreaterEqual", math_ops.greater_equal) @RegisterPForWithArgs("Greater", math_ops.greater) @RegisterPForWithArgs("Igammac", math_ops.igammac) @RegisterPForWithArgs("IgammaGradA", math_ops.igamma_grad_a) @RegisterPForWithArgs("Igamma", math_ops.igamma) @RegisterPForWithArgs("Imag", math_ops.imag) @RegisterPForWithArgs("Invert", bitwise_ops.invert) @RegisterPForWithArgs("Inv", math_ops.inv) @RegisterPForWithArgs("IsFinite", math_ops.is_finite) @RegisterPForWithArgs("IsInf", math_ops.is_inf) @RegisterPForWithArgs("LeftShift", bitwise_ops.left_shift) @RegisterPForWithArgs("LessEqual", math_ops.less_equal) @RegisterPForWithArgs("Less", math_ops.less) @RegisterPForWithArgs("Lgamma", math_ops.lgamma) @RegisterPForWithArgs("Log1p", math_ops.log1p) @RegisterPForWithArgs("LogicalAnd", math_ops.logical_and) @RegisterPForWithArgs("LogicalNot", math_ops.logical_not) @RegisterPForWithArgs("LogicalOr", math_ops.logical_or) @RegisterPForWithArgs("LogicalXor", math_ops.logical_xor) @RegisterPForWithArgs("Log", math_ops.log) @RegisterPForWithArgs("Maximum", math_ops.maximum) @RegisterPForWithArgs("Minimum", math_ops.minimum) @RegisterPForWithArgs("Mod", math_ops.mod) @RegisterPForWithArgs("Mul", math_ops.multiply) @RegisterPForWithArgs("Neg", math_ops.negative) @RegisterPForWithArgs("NotEqual", math_ops.not_equal) @RegisterPForWithArgs("Polygamma", math_ops.polygamma) @RegisterPForWithArgs("Pow", math_ops.pow) @RegisterPForWithArgs("RealDiv", math_ops.divide) @RegisterPForWithArgs("Real", math_ops.real) @RegisterPForWithArgs("Reciprocal", math_ops.reciprocal) @RegisterPForWithArgs("Relu6", nn_ops.relu6) @RegisterPForWithArgs("Relu", nn_ops.relu) @RegisterPForWithArgs("RightShift", bitwise_ops.right_shift) @RegisterPForWithArgs("Rint", math_ops.rint) @RegisterPForWithArgs("Round", math_ops.round) @RegisterPForWithArgs("Rsqrt", math_ops.rsqrt) @RegisterPForWithArgs("Selu", nn_ops.selu) @RegisterPForWithArgs("Sigmoid", math_ops.sigmoid) @RegisterPForWithArgs("Sign", math_ops.sign) @RegisterPForWithArgs("Sinh", math_ops.sinh) @RegisterPForWithArgs("Sin", math_ops.sin) @RegisterPForWithArgs("Softplus", nn_ops.softplus) @RegisterPForWithArgs("Softsign", nn_ops.softsign) @RegisterPForWithArgs("Sqrt", math_ops.sqrt) @RegisterPForWithArgs("SquaredDifference", math_ops.squared_difference) @RegisterPForWithArgs("Square", math_ops.square) @RegisterPForWithArgs("Sub", math_ops.subtract) @RegisterPForWithArgs("Tanh", math_ops.tanh) @RegisterPForWithArgs("Tan", math_ops.tan) @RegisterPForWithArgs("TruncateDiv", math_ops.truncate_div) @RegisterPForWithArgs("TruncateMod", math_ops.truncate_mod) @RegisterPForWithArgs("Zeta", math_ops.zeta) def _convert_cwise(pfor_input, op_type, op_func): # Note that ops handled here do not have attributes except "T" and "Tout", and # hence don't need extra arguments passed to the cwise_op call below. for attr in pfor_input.op.node_def.attr.keys(): assert attr in [u"T", u"Tout"], (op_type, attr) pfor_input.expanddim_inputs_for_broadcast() return wrap(op_func(*[x.t for x in pfor_input.inputs]), True) @RegisterPFor("ApproximateEqual") def _convert_approximate_equal(pfor_input): pfor_input.expanddim_inputs_for_broadcast() x = pfor_input.input(0)[0] y = pfor_input.input(1)[0] tolerance = pfor_input.get_attr("tolerance") return wrap(math_ops.approximate_equal(x, y, tolerance=tolerance), True) @RegisterPFor("Shape") def _convert_shape(pfor_input): out_type = pfor_input.get_attr("out_type") return wrap( array_ops.shape(pfor_input.stacked_input(0), out_type=out_type)[1:], False) @RegisterPFor("ShapeN") def _convert_shape_n(pfor_input): out_type = pfor_input.get_attr("out_type") shapes = [ array_ops.shape(x, out_type=out_type)[1:] if stacked else array_ops.shape(x) for x, stacked, _ in pfor_input.inputs ] return [wrap(x, False) for x in shapes] @RegisterPFor("Size") def _convert_size(pfor_input): out_type = pfor_input.get_attr("out_type") n = math_ops.cast(pfor_input.pfor.loop_len_vector[0], out_type) return wrap( array_ops.size(pfor_input.stacked_input(0), out_type=out_type) // n, False) @RegisterPFor("Rank") def _convert_rank(pfor_input): return wrap(array_ops.rank(pfor_input.stacked_input(0)) - 1, False) @RegisterPFor("AddN") def _convert_addn(pfor_input): # AddN does not support broadcasting. pfor_input.stack_inputs() return wrap(math_ops.add_n([x.t for x in pfor_input.inputs]), True) @RegisterPFor("BiasAddGrad") def _convert_biasaddgrad(pfor_input): grad = pfor_input.stacked_input(0) fmt = pfor_input.get_attr("data_format") if fmt == b"NCHW": output = math_ops.reduce_sum(grad, axis=[1, 3, 4], keepdims=False) else: grad_shape = array_ops.shape(grad) last_dim_shape = grad_shape[-1] first_dim_shape = grad_shape[0] output = array_ops.reshape(grad, [first_dim_shape, -1, last_dim_shape]) output = math_ops.reduce_sum(output, axis=[1], keepdims=False) return wrap(output, True) # Some required ops are not exposed under the tf namespace. Hence relying on # _create_op to create them. @RegisterPForWithArgs("EluGrad") @RegisterPForWithArgs("Relu6Grad") @RegisterPForWithArgs("ReluGrad") @RegisterPForWithArgs("SeluGrad") @RegisterPForWithArgs("SigmoidGrad") @RegisterPForWithArgs("SoftplusGrad") @RegisterPForWithArgs("SoftsignGrad") @RegisterPForWithArgs("TanhGrad") @RegisterPForWithArgs("SqrtGrad") @RegisterPForWithArgs("RsqrtGrad") @RegisterPForWithArgs("ReciprocalGrad") def _convert_grads(pfor_input, op_type, *args, **kw_args): del args del kw_args # TODO(agarwal): Looks like these ops don't support broadcasting. Hence we # have to use tiling here. pfor_input.stack_inputs() outputs = _create_op( op_type, [x.t for x in pfor_input.inputs], [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs return [wrap(x, True) for x in outputs] @RegisterPFor("Select") def _convert_select(pfor_input): pfor_input.stack_inputs() cond = pfor_input.stacked_input(0) t = pfor_input.stacked_input(1) e = pfor_input.stacked_input(2) cond_rank = array_ops.rank(cond) cond, t, e = control_flow_ops.cond( cond_rank > 1, lambda: _inputs_with_flattening(pfor_input, [0, 1, 2]), lambda: [cond, t, e]) outputs = _create_op( pfor_input.op_type, [cond, t, e], [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs n = pfor_input.pfor.loop_len_vector out = control_flow_ops.cond(cond_rank > 1, lambda: _unflatten_first_dim(outputs[0], n), lambda: outputs[0]) return [wrap(out, True) for x in outputs] # random_ops @RegisterPForWithArgs("RandomUniform") @RegisterPForWithArgs("RandomUniformInt") @RegisterPForWithArgs("RandomStandardNormal") @RegisterPForWithArgs("TruncatedNormal") @RegisterPForWithArgs("RandomGamma") @RegisterPForWithArgs("RandomPoissonV2") def _convert_random(pfor_input, op_type, *args, **kw_args): del args del kw_args inputs = [pfor_input.unstacked_input(i) for i in range(pfor_input.num_inputs)] # inputs[0] is "shape" inputs[0] = array_ops.concat( [pfor_input.pfor.loop_len_vector, inputs[0]], axis=0) logging.warning( "Note that %s inside pfor op may not give same output as " "inside a sequential loop.", op_type) outputs = _create_op( op_type, inputs, [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs return [wrap(x, True) for x in outputs] # logging_ops @RegisterPFor("Assert") def _convert_assert(pfor_input): cond, cond_stacked, _ = pfor_input.input(0) if cond_stacked: cond = math_ops.reduce_all(cond) data_list = [x.t for x in pfor_input.inputs][1:] return _create_op("Assert", [cond] + data_list, [], attrs=pfor_input.op.node_def.attr) @RegisterPFor("Print") def _convert_print(pfor_input): # Note that we don't stack all the inputs. Hence unstacked values are printed # once here vs multiple times in a while_loop. pfor_input.stack_inputs([0]) outputs = _create_op( "Print", [x.t for x in pfor_input.inputs], [x.dtype for x in pfor_input.outputs], attrs=pfor_input.op.node_def.attr).outputs return [wrap(x, True) for x in outputs] # data_flow_ops # TensorArray conversion is tricky since we don't support arrays of # TensorArrays. For converting them, we consider two distinct cases: # # 1. The array is constructed outside the pfor call, and read/written inside the # loop. # This is an easier case since we don't need to make an array of TensorArrays. # A correctness requirement is that these parallel iterations shouldn't attempt # to write to the same location. Hence at conversion time we disallow indices to # be loop-invariant as that would guarantee a collision. Even if the indices are # not loop-invariant, they could conflict and that shall trigger runtime errors. # # 2. The array is constructed and used entirely inside each pfor iteration. # For simplicity, here we require that the indices used for write/scatter are # "unstacked". Otherwise it becomes hard to merge the TensorArrays created in # different pfor iterations. We consider two sub_cases: # # 2a Elements written to the array are "stacked" # To simulate multiple TensorArrays, we may increase the dimension of each # element of the array. i.e. the i_th row of the j_th entry of the converted # TensorArray corresponds to the j_th entry of the TensorArray in the i_th # pfor iteration. # # 2b Elements written to the array are "unstacked" # In this case we don't increase the dimensions to avoid redundant tiling. Each # iteration is trying to write the same value. So we convert that to a single # write. # # Here are some tricks used to implement the above: # - TensorArrayV3 constructor encodes the element shape as an attr. Instead of # trying to trace whether future writes are stacked or unstacked in order to set # this attr, we set it to correspond to unknown shape. # - We use the "flow" output of the different ops to track whether the array # elements are stacked or unstacked. If a stacked write/scatter is done, we make # the flow stacked as well. # - We use some heuristic traversal of the graph to track whether the # TensorArray handle was created inside or outside the pfor loop. @RegisterPFor("TensorArrayV3") def _convert_tensor_array_v3(pfor_input): size = pfor_input.unstacked_input(0) dtype = pfor_input.get_attr("dtype") dynamic_size = pfor_input.get_attr("dynamic_size") clear_after_read = pfor_input.get_attr("clear_after_read") identical_element_shapes = pfor_input.get_attr("identical_element_shapes") tensor_array_name = pfor_input.get_attr("tensor_array_name") handle, flow = data_flow_ops.tensor_array_v3( size, dtype=dtype, # We don't set element shape since we don't know if writes are stacked or # not yet. element_shape=None, dynamic_size=dynamic_size, clear_after_read=clear_after_read, identical_element_shapes=identical_element_shapes, tensor_array_name=tensor_array_name) # Note we keep flow unstacked for now since we don't know if writes will be # stacked or not. return wrap(handle, False), wrap(flow, False) @RegisterPFor("TensorArraySizeV3") def _convert_tensor_array_size_v3(pfor_input): handle = pfor_input.unstacked_input(0) flow, flow_stacked, _ = pfor_input.input(1) if flow_stacked: flow = _unstack_flow(flow) size = data_flow_ops.tensor_array_size_v3(handle, flow) return wrap(size, False) def _handle_inside_pfor(pfor_input, handle): """Returns True if handle was created inside the pfor loop.""" # We use some heuristic to find the original TensorArray creation op. # The logic should handle the common cases (except cond based subgraphs). # In theory the user could perform different operations on the handle (like # Reshape, stack multiple handles, etc) which could break this logic. # TODO(agarwal): handle Switch/Merge. while handle.op.type in ("Enter", "Identity"): handle = handle.op.inputs[0] if handle.op.type not in [ "TensorArrayV3", "TensorArrayGradV3", "TensorArrayGradWithShape"]: raise ValueError("Unable to find source for handle %s" % handle) else: return pfor_input.pfor.op_is_inside_loop(handle.op) def _unstack_flow(value): # TODO(agarwal): consider looking if this is a Tile op then get its input. # This may avoid running the Tile operations. return array_ops.gather(value, 0) @RegisterPFor("TensorArrayReadV3") def _convert_tensor_array_read_v3(pfor_input): handle = pfor_input.unstacked_input(0) index, index_stacked, _ = pfor_input.input(1) dtype = pfor_input.get_attr("dtype") flow, flow_stacked, _ = pfor_input.input(2) if flow_stacked: flow = _unstack_flow(flow) is_inside_pfor = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0]) if is_inside_pfor: # Note that if we are inside a control flow construct inside the pfor, and # only some of the iterations are doing the read (i.e. # `all_indices_partitioned` is True), then the read operation should only # return values for the currently active pfor iterations (`all_indices` # below). Hence, whenever the returned value is stacked (i.e. `flow` is # stacked), we may need to do an extra gather after reading the values. Also # note that if `is_inside` is false, then values in the tensor array are # unstacked. So the check is only needed in this branch. all_indices = pfor_input.pfor.all_indices all_indices_partitioned = pfor_input.pfor.all_indices_partitioned # Note: flow_stacked indicates if values in the TensorArray are stacked or # not. if index_stacked: if flow_stacked: raise ValueError( "It looks like TensorArrayReadV3 was called on a TensorArray whose" " values are not loop-invariant, and the read indices were also" " not loop invariant. This is currently unsupported.") value = data_flow_ops.tensor_array_gather_v3( handle, index, flow, dtype=dtype) return wrap(value, True) value = data_flow_ops.tensor_array_read_v3( handle, index, flow, dtype=dtype) if flow_stacked and all_indices_partitioned: value = array_ops.gather(value, all_indices) return wrap(value, flow_stacked) # Values in the TensorArray should be unstacked (since different iterations # couldn't write to the same location). So whether output is stacked or not # depends on index_stacked. if index_stacked: value = data_flow_ops.tensor_array_gather_v3( handle, index, flow, dtype=dtype) else: value = data_flow_ops.tensor_array_read_v3( handle, index, flow, dtype=dtype) return wrap(value, index_stacked) @RegisterPFor("TensorArrayWriteV3") def _convert_tensor_array_write_v3(pfor_input): handle = pfor_input.unstacked_input(0) index, index_stacked, _ = pfor_input.input(1) value, value_stacked, _ = pfor_input.input(2) flow, flow_stacked, _ = pfor_input.input(3) if value_stacked and pfor_input.pfor.all_indices_partitioned: # Looks like we are in a control flow in a pfor where not all iterations are # active now. We don't allow that since that could lead to different indices # having different shapes which will be hard to merge later. raise ValueError("Writing non loop invariant values to TensorArray from " "inside a while_loop/cond not supported.") if flow_stacked: flow = _unstack_flow(flow) is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0]) if is_inside: if index_stacked: raise ValueError("Need indices for %s to be loop invariant" % handle) if not flow_stacked and not value_stacked: flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow) return wrap(flow_out, False) else: if not value_stacked: value = _stack(value, pfor_input.pfor.loop_len_vector).t # TODO(agarwal): Note that if flow is unstacked and value is stacked, then # this may or may not be a safe situation. flow is unstacked both for a # freshly created TensorArray, as well as after unstacked values are # written to it. If it is the latter, then we cannot write a stacked value # now since that may cause runtime errors due to different shapes in the # array. At the moment we are not able to handle this gracefully and # distinguish between the two cases. That would require some heuristic # traversal of the graph to figure out whether all the writes are # unstacked or not. flow_out = data_flow_ops.tensor_array_write_v3(handle, index, value, flow) return _stack(flow_out, pfor_input.pfor.loop_len_vector) else: if not index_stacked: raise ValueError("Need indices for %s to be not loop invariant" % handle) # Note that even when index_stacked is true, actual values in index may # still not be unique. However that will cause runtime error when executing # the scatter operation below. if not value_stacked: value = _stack(value, pfor_input.pfor.loop_len_vector).t flow_out = data_flow_ops.tensor_array_scatter_v3(handle, index, value, flow) return _stack(flow_out, pfor_input.pfor.loop_len_vector) def _transpose_first_two_dims(value): # TODO(agarwal): optimize if one of the dims == 1. value_shape = array_ops.shape(value) v0 = value_shape[0] v1 = value_shape[1] value = array_ops.reshape(value, [v0, v1, -1]) value = array_ops.transpose(value, [1, 0, 2]) new_shape = array_ops.concat([[v1, v0], value_shape[2:]], axis=0) return array_ops.reshape(value, new_shape) @RegisterPFor("TensorArrayGatherV3") def _convert_tensor_array_gather_v3(pfor_input): handle = pfor_input.unstacked_input(0) indices, indices_stacked, _ = pfor_input.input(1) indices = array_ops.reshape(indices, [-1]) flow, flow_stacked, _ = pfor_input.input(2) if flow_stacked: flow = _unstack_flow(flow) dtype = pfor_input.get_attr("dtype") # TODO(agarwal): support element_shape attr? n = pfor_input.pfor.loop_len_vector value = data_flow_ops.tensor_array_gather_v3( handle, indices, flow, dtype=dtype) is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0]) if is_inside: # flow_stacked indicates if values in the TensorArray are stacked or not. if indices_stacked: if flow_stacked: raise ValueError( "It looks like TensorArrayGatherV3 was called on a TensorArray " "whose values are not loop-invariant, and the indices were also " "not loop invariant. This is currently unsupported.") else: value = _unflatten_first_dim(value, n) return wrap(value, True) else: if flow_stacked: # Since elements in this array are stacked and `value` was produced by # gather, its first two dims are "gathered elements" and "stack # dimension". Our semantics require these two to be flipped. value = _transpose_first_two_dims(value) return wrap(value, flow_stacked) else: # Values in the TensorArray should be unstacked (since different iterations # couldn't write to the same location). So whether output is stacked or not # depends on indices_stacked. if indices_stacked: value = _unflatten_first_dim(value, n) return wrap(value, indices_stacked) @RegisterPFor("TensorArrayScatterV3") def _convert_tensor_array_scatter_v3(pfor_input): handle = pfor_input.unstacked_input(0) indices, indices_stacked, _ = pfor_input.input(1) indices = array_ops.reshape(indices, [-1]) value, value_stacked, _ = pfor_input.input(2) flow, flow_stacked, _ = pfor_input.input(3) if flow_stacked: flow = _unstack_flow(flow) is_inside = _handle_inside_pfor(pfor_input, pfor_input.op.inputs[0]) if is_inside: if indices_stacked: raise ValueError("Need indices for %s to be loop invariant" % handle) # Note that flow_stacked indicates if existing values in the array are # stacked or not. if not flow_stacked and not value_stacked: flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value, flow) return wrap(flow_out, False) if not value_stacked: # TODO(agarwal): tile in the second dimension directly instead of # transposing below. value = _stack(value, pfor_input.pfor.loop_len_vector).t value = _transpose_first_two_dims(value) # TODO(agarwal): Note that if a previous write was unstacked, flow will be # unstacked, and a stacked value may be written here which may cause # runtime error due to different elements having different shape. We do # not try to prevent that. flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value, flow) return _stack(flow_out, pfor_input.pfor.loop_len_vector) if not indices_stacked: raise ValueError("Need indices for %s to be not loop invariant" % handle) if not value_stacked: value = _stack(value, pfor_input.pfor.loop_len_vector).t value = _flatten_first_two_dims(value) flow_out = data_flow_ops.tensor_array_scatter_v3(handle, indices, value, flow) return _stack(flow_out, pfor_input.pfor.loop_len_vector) @RegisterPFor("TensorArrayGradV3") def _convert_tensor_array_grad_v3(pfor_input): handle = pfor_input.unstacked_input(0) flow, flow_stacked, _ = pfor_input.input(1) if flow_stacked: flow = _unstack_flow(flow) source = pfor_input.get_attr("source") # TODO(agarwal): For now, we assume that gradients are stacked if the # TensorArrayGradV3 call is being done inside the pfor. Getting that wrong # will give runtime error due to incorrect shape being written to the # accumulator. It is difficult to know in advance if gradients written will be # stacked or not. Note that flow being stacked is not indicative of the # gradient being stacked or not. Revisit this later. shape_to_prepend = pfor_input.pfor.loop_len_vector grad_handle, flow_out = data_flow_ops.tensor_array_grad_with_shape( handle=handle, flow_in=flow, shape_to_prepend=shape_to_prepend, source=source) flow_out = _stack(flow_out, pfor_input.pfor.loop_len_vector).t return [wrap(grad_handle, False), wrap(flow_out, True)] # StackV2 conversion is tricky since we don't have arrays of StackV2. So similar # to TensorArrays, we convert them by changing the dimension of the elements # inside the stack. # # We consider two cases: # # 1. StackV2 is constructed and used entirely inside the pfor loop. # We keep a single Stack and perform the push/pop operations of all the # iterations in lock-step. We also assume that all the iterations perform these # operations. In case of dynamic control flow, if only some of the iterations # try to perform a push/pop, then the conversion may not work correctly and may # cause undefined behavior. # TODO(agarwal): test StackV2 with dynamic control flow. # # 2. StackV2 is constructed outside the pfor loop. # Performing stack push/pop in a parallel fashion is ill-defined. However given # that reading stacks created externally is a common operation when computing # jacobians, we provide some special semantics here as follows. # - disallow push operations to the stack # - pop operations are performed in lock step by all iterations, similar to the # case when the stack is created inside. A single value is popped during the # lock-step operation and broadcast to all the iterations. Values in the stack # are assumed to be loop-invariant. # # Some other implementation details: # We use an ugly logic to find whether values in Stack data structure are # loop invariant or not. When converting push/pop operations, we keep track of # whether the last conversion used a stacked value or not (see _stack_cache # below). As a result if an unstacked value is written first, subsequent stacked # writes are disallowed when they could have been allowed in theory. # Map from cache key based on StackV2 handle to a bool indicating whether values # are stacked or not. # TODO(agarwal): move _stack_cache inside pfor? _stack_cache = {} def _stack_cache_key(pfor_input): """Create cache key corresponding to a stack handle.""" op_type = pfor_input.op_type assert op_type in ["StackPushV2", "StackPopV2"], op_type orig_handle = pfor_input.op.inputs[0] while orig_handle.op.type in ["Identity", "Enter"]: orig_handle = orig_handle.op.inputs[0] assert orig_handle.op.type == "StackV2", orig_handle.op return ops.get_default_graph(), pfor_input.pfor, orig_handle def _stack_handle_inside_pfor(handle, pfor_input): while handle.op.type in ["Identity", "Enter"]: handle = handle.op.inputs[0] assert handle.op.type == "StackV2", ( "Unable to find StackV2 op. Got %s" % handle.op) return pfor_input.pfor.op_is_inside_loop(handle.op) @RegisterPFor("StackPushV2") def _convert_stack_push_v2(pfor_input): handle = pfor_input.unstacked_input(0) elem, elem_stacked, _ = pfor_input.input(1) swap_memory = pfor_input.get_attr("swap_memory") if not _stack_handle_inside_pfor(pfor_input.op.inputs[0], pfor_input): raise ValueError("StackPushV2 not allowed on stacks created outside pfor") stack_cache_key = _stack_cache_key(pfor_input) stacked = _stack_cache.get(stack_cache_key, None) if stacked is None: stacked = elem_stacked _stack_cache[stack_cache_key] = stacked else: # If we previously made it unstacked then we can't revert to being stacked. if not stacked and elem_stacked: raise ValueError( "It looks like the stack was previously determined to be loop" " invariant, but we are now trying to push a loop dependent value" " to it. This is currently unsupported.") if stacked and not elem_stacked: elem = _stack(elem, pfor_input.pfor.loop_len_vector).t out = data_flow_ops.stack_push_v2(handle, elem, swap_memory=swap_memory) return wrap(out, stacked) # Note that inputs to this convertor will be unstacked. However it should get # called since it is a stateful op. @RegisterPFor("StackPopV2") def _convert_stack_pop_v2(pfor_input): handle = pfor_input.unstacked_input(0) stack_cache_key = _stack_cache_key(pfor_input) stacked = _stack_cache.get(stack_cache_key, None) # If a StackPushV2 has not been converted yet, we default to unstacked since # the push could be outside of pfor, or the covertor may not be called if the # inputs are unconverted. if stacked is None: stacked = False _stack_cache[stack_cache_key] = False elem_type = pfor_input.get_attr("elem_type") out = data_flow_ops.stack_pop_v2(handle, elem_type) return wrap(out, stacked) # parsing_ops @RegisterPFor("DecodeCSV") def _convert_decode_csv(pfor_input): lines = pfor_input.stacked_input(0) record_defaults = [ pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs) ] field_delim = pfor_input.get_attr("field_delim") use_quote_delim = pfor_input.get_attr("use_quote_delim") select_cols = pfor_input.get_attr("select_cols") if not select_cols: select_cols = None return [ wrap(t, True) for t in parsing_ops.decode_csv( lines, record_defaults, field_delim=field_delim, use_quote_delim=use_quote_delim, select_cols=select_cols) ] @RegisterPFor("ParseSingleExample") def _convert_parse_single_example(pfor_input): serialized = pfor_input.stacked_input(0) dense_defaults = [ pfor_input.unstacked_input(i) for i in range(1, pfor_input.num_inputs) ] sparse_keys = pfor_input.get_attr("sparse_keys") dense_keys = pfor_input.get_attr("dense_keys") sparse_types = pfor_input.get_attr("sparse_types") dense_shapes = pfor_input.get_attr("dense_shapes") output = gen_parsing_ops.parse_example( serialized=serialized, names=[], dense_defaults=dense_defaults, sparse_keys=sparse_keys, dense_keys=dense_keys, sparse_types=sparse_types, dense_shapes=dense_shapes) return [wrap(t, True, True) for t in nest.flatten(output)]
luotao1/Paddle
refs/heads/develop
python/paddle/fluid/tests/unittests/test_imperative_group.py
2
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import contextlib import unittest import numpy as np import six import unittest import paddle import paddle.fluid as fluid import paddle.fluid.dygraph as dygraph from paddle.fluid.dygraph.nn import Linear import paddle.fluid.core as core from paddle.fluid.optimizer import SGDOptimizer class MLP(fluid.Layer): def __init__(self, param_attr=None, bias_attr=None): super(MLP, self).__init__() self._linear1 = Linear(784, 10) self._linear2 = Linear(10, 10) def forward(self, inputs): y = self._linear1(inputs) y = self._linear2(y) return y class TestDataParallelGroup(unittest.TestCase): def create_varbase(self, dtype, shape, type=core.VarDesc.VarType.LOD_TENSOR): return core.VarBase(dtype, shape, "", type, True) def test_construct_group0(self): # one dtype & one limit capability var_list = [] var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 50])) var_list.append( self.create_varbase(core.VarDesc.VarType.FP32, [2, 100])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 25])) res = core.assign_group_by_size(var_list, [False, False, False, False], [400]) self.assertEqual([[0], [1], [2], [3]], res) def test_construct_group1(self): # multi dtype & one limit capability var_list = [] var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) res = core.assign_group_by_size( var_list, [False, False, False, False, False, False], [400]) self.assertEqual([[0, 2], [1, 3], [4], [5]], res) def test_construct_group2(self): # one dtype & multi limit capability var_list = [] var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 50])) res = core.assign_group_by_size(var_list, [False, False, False, False], [400, 800]) self.assertEqual([[0], [1, 2], [3]], res) def test_construct_group3(self): # multi dtype & multi limit capability var_list = [] var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) res = core.assign_group_by_size( var_list, [False, False, False, False, False, False], [200, 400]) self.assertEqual([[0], [1], [2, 4], [3, 5]], res) def test_construct_group4(self): # multi dtype & zero limit capability var_list = [] var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) res = core.assign_group_by_size( var_list, [False, False, False, False, False, False], [0]) self.assertEqual([[0], [1], [2], [3], [4], [5]], res) def test_construct_group5(self): # multi dtype & infinite capability var_list = [] var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) res = core.assign_group_by_size( var_list, [False, False, False, False, False, False], [10000]) self.assertEqual([[0, 2, 4], [1, 3, 5]], res) def test_construct_group6(self): # multi dtype & limit capability & multi tensor type var_list = [] var_list.append( self.create_varbase(core.VarDesc.VarType.FP32, [1, 50], core.VarDesc.VarType.SELECTED_ROWS)) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append( self.create_varbase(core.VarDesc.VarType.FP64, [1, 25], core.VarDesc.VarType.SELECTED_ROWS)) res = core.assign_group_by_size( var_list, [True, False, False, False, False, True], [400]) self.assertEqual([[0], [1, 3], [2, 4], [5]], res) def test_construct_group7(self): # multi dtype & multi limit capability & multi tensor type var_list = [] var_list.append( self.create_varbase(core.VarDesc.VarType.FP32, [1, 50], core.VarDesc.VarType.SELECTED_ROWS)) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP64, [1, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [1, 50])) var_list.append( self.create_varbase(core.VarDesc.VarType.FP64, [1, 25], core.VarDesc.VarType.SELECTED_ROWS)) res = core.assign_group_by_size( var_list, [True, False, False, False, False, True], [200, 400]) self.assertEqual([[0], [1], [2], [3], [4], [5]], res) def test_construct_group8(self): # one dtype & one limit capability & have tensor_indices var_list = [] var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 25])) var_list.append( self.create_varbase(core.VarDesc.VarType.FP32, [2, 100])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 50])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 25])) res = core.assign_group_by_size(var_list, [False, False, False, False], [400], [3, 0, 1, 2]) self.assertEqual([[3, 0], [1], [2]], res) def test_construct_group9(self): # one dtype & one limit capability & have tensor_indices var_list = [] var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 25])) var_list.append(self.create_varbase(core.VarDesc.VarType.FP32, [2, 25])) var_list.append( self.create_varbase(core.VarDesc.VarType.FP32, [2, 1000])) res = core.assign_group_by_size(var_list, [False, False, False, True], [300], [1, 0, 2, 3]) self.assertEqual([[1, 0], [3], [2]], res) if __name__ == '__main__': unittest.main()
WebSpider/headphones
refs/heads/master
lib/apscheduler/schedulers/tornado.py
33
from __future__ import absolute_import from datetime import timedelta from functools import wraps from apscheduler.schedulers.base import BaseScheduler from apscheduler.util import maybe_ref try: from tornado.ioloop import IOLoop except ImportError: # pragma: nocover raise ImportError('TornadoScheduler requires tornado installed') def run_in_ioloop(func): @wraps(func) def wrapper(self, *args, **kwargs): self._ioloop.add_callback(func, self, *args, **kwargs) return wrapper class TornadoScheduler(BaseScheduler): """ A scheduler that runs on a Tornado IOLoop. =========== =============================================================== ``io_loop`` Tornado IOLoop instance to use (defaults to the global IO loop) =========== =============================================================== """ _ioloop = None _timeout = None def start(self): super(TornadoScheduler, self).start() self.wakeup() @run_in_ioloop def shutdown(self, wait=True): super(TornadoScheduler, self).shutdown(wait) self._stop_timer() def _configure(self, config): self._ioloop = maybe_ref(config.pop('io_loop', None)) or IOLoop.current() super(TornadoScheduler, self)._configure(config) def _start_timer(self, wait_seconds): self._stop_timer() if wait_seconds is not None: self._timeout = self._ioloop.add_timeout(timedelta(seconds=wait_seconds), self.wakeup) def _stop_timer(self): if self._timeout: self._ioloop.remove_timeout(self._timeout) del self._timeout @run_in_ioloop def wakeup(self): self._stop_timer() wait_seconds = self._process_jobs() self._start_timer(wait_seconds)
cedk/odoo
refs/heads/8.0
addons/payment_paypal/tests/__init__.py
14224
# -*- coding: utf-8 -*-
mrquim/mrquimrepo
refs/heads/master
repo/script.module.exodus/lib/resources/lib/modules/pyaes/__init__.py
138
# The MIT License (MIT) # # Copyright (c) 2014 Richard Moore # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # This is a pure-Python implementation of the AES algorithm and AES common # modes of operation. # See: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard # See: https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation # Supported key sizes: # 128-bit # 192-bit # 256-bit # Supported modes of operation: # ECB - Electronic Codebook # CBC - Cipher-Block Chaining # CFB - Cipher Feedback # OFB - Output Feedback # CTR - Counter # See the README.md for API details and general information. # Also useful, PyCrypto, a crypto library implemented in C with Python bindings: # https://www.dlitz.net/software/pycrypto/ VERSION = [1, 3, 0] from .aes import AES, AESModeOfOperationCTR, AESModeOfOperationCBC, AESModeOfOperationCFB, AESModeOfOperationECB, AESModeOfOperationOFB, AESModesOfOperation, Counter from .blockfeeder import decrypt_stream, Decrypter, encrypt_stream, Encrypter
fossoult/odoo
refs/heads/8.0
addons/account/wizard/account_fiscalyear_close_state.py
297
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ class account_fiscalyear_close_state(osv.osv_memory): """ Closes Account Fiscalyear """ _name = "account.fiscalyear.close.state" _description = "Fiscalyear Close state" _columns = { 'fy_id': fields.many2one('account.fiscalyear', \ 'Fiscal Year to Close', required=True, help="Select a fiscal year to close"), } def data_save(self, cr, uid, ids, context=None): """ This function close account fiscalyear @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of Account fiscalyear close state’s IDs """ journal_period_obj = self.pool.get('account.journal.period') period_obj = self.pool.get('account.period') fiscalyear_obj = self.pool.get('account.fiscalyear') account_move_obj = self.pool.get('account.move') for data in self.read(cr, uid, ids, context=context): fy_id = data['fy_id'][0] account_move_ids = account_move_obj.search(cr, uid, [('period_id.fiscalyear_id', '=', fy_id), ('state', '=', "draft")], context=context) if account_move_ids: raise osv.except_osv(_('Invalid Action!'), _('In order to close a fiscalyear, you must first post related journal entries.')) cr.execute('UPDATE account_journal_period ' \ 'SET state = %s ' \ 'WHERE period_id IN (SELECT id FROM account_period \ WHERE fiscalyear_id = %s)', ('done', fy_id)) cr.execute('UPDATE account_period SET state = %s ' \ 'WHERE fiscalyear_id = %s', ('done', fy_id)) cr.execute('UPDATE account_fiscalyear ' \ 'SET state = %s WHERE id = %s', ('done', fy_id)) self.invalidate_cache(cr, uid, context=context) return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
yuhcaesar/emacsrc
refs/heads/master
.emacs.d/.python-environments/default/Lib/encodings/iso8859_16.py
93
""" Python Character Mapping Codec iso8859_16 generated from 'MAPPINGS/ISO8859/8859-16.TXT' with gencodec.py. """#" import codecs ### Codec APIs class Codec(codecs.Codec): def encode(self,input,errors='strict'): return codecs.charmap_encode(input,errors,encoding_table) def decode(self,input,errors='strict'): return codecs.charmap_decode(input,errors,decoding_table) class IncrementalEncoder(codecs.IncrementalEncoder): def encode(self, input, final=False): return codecs.charmap_encode(input,self.errors,encoding_table)[0] class IncrementalDecoder(codecs.IncrementalDecoder): def decode(self, input, final=False): return codecs.charmap_decode(input,self.errors,decoding_table)[0] class StreamWriter(Codec,codecs.StreamWriter): pass class StreamReader(Codec,codecs.StreamReader): pass ### encodings module API def getregentry(): return codecs.CodecInfo( name='iso8859-16', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) ### Decoding Table decoding_table = ( u'\x00' # 0x00 -> NULL u'\x01' # 0x01 -> START OF HEADING u'\x02' # 0x02 -> START OF TEXT u'\x03' # 0x03 -> END OF TEXT u'\x04' # 0x04 -> END OF TRANSMISSION u'\x05' # 0x05 -> ENQUIRY u'\x06' # 0x06 -> ACKNOWLEDGE u'\x07' # 0x07 -> BELL u'\x08' # 0x08 -> BACKSPACE u'\t' # 0x09 -> HORIZONTAL TABULATION u'\n' # 0x0A -> LINE FEED u'\x0b' # 0x0B -> VERTICAL TABULATION u'\x0c' # 0x0C -> FORM FEED u'\r' # 0x0D -> CARRIAGE RETURN u'\x0e' # 0x0E -> SHIFT OUT u'\x0f' # 0x0F -> SHIFT IN u'\x10' # 0x10 -> DATA LINK ESCAPE u'\x11' # 0x11 -> DEVICE CONTROL ONE u'\x12' # 0x12 -> DEVICE CONTROL TWO u'\x13' # 0x13 -> DEVICE CONTROL THREE u'\x14' # 0x14 -> DEVICE CONTROL FOUR u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE u'\x16' # 0x16 -> SYNCHRONOUS IDLE u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK u'\x18' # 0x18 -> CANCEL u'\x19' # 0x19 -> END OF MEDIUM u'\x1a' # 0x1A -> SUBSTITUTE u'\x1b' # 0x1B -> ESCAPE u'\x1c' # 0x1C -> FILE SEPARATOR u'\x1d' # 0x1D -> GROUP SEPARATOR u'\x1e' # 0x1E -> RECORD SEPARATOR u'\x1f' # 0x1F -> UNIT SEPARATOR u' ' # 0x20 -> SPACE u'!' # 0x21 -> EXCLAMATION MARK u'"' # 0x22 -> QUOTATION MARK u'#' # 0x23 -> NUMBER SIGN u'$' # 0x24 -> DOLLAR SIGN u'%' # 0x25 -> PERCENT SIGN u'&' # 0x26 -> AMPERSAND u"'" # 0x27 -> APOSTROPHE u'(' # 0x28 -> LEFT PARENTHESIS u')' # 0x29 -> RIGHT PARENTHESIS u'*' # 0x2A -> ASTERISK u'+' # 0x2B -> PLUS SIGN u',' # 0x2C -> COMMA u'-' # 0x2D -> HYPHEN-MINUS u'.' # 0x2E -> FULL STOP u'/' # 0x2F -> SOLIDUS u'0' # 0x30 -> DIGIT ZERO u'1' # 0x31 -> DIGIT ONE u'2' # 0x32 -> DIGIT TWO u'3' # 0x33 -> DIGIT THREE u'4' # 0x34 -> DIGIT FOUR u'5' # 0x35 -> DIGIT FIVE u'6' # 0x36 -> DIGIT SIX u'7' # 0x37 -> DIGIT SEVEN u'8' # 0x38 -> DIGIT EIGHT u'9' # 0x39 -> DIGIT NINE u':' # 0x3A -> COLON u';' # 0x3B -> SEMICOLON u'<' # 0x3C -> LESS-THAN SIGN u'=' # 0x3D -> EQUALS SIGN u'>' # 0x3E -> GREATER-THAN SIGN u'?' # 0x3F -> QUESTION MARK u'@' # 0x40 -> COMMERCIAL AT u'A' # 0x41 -> LATIN CAPITAL LETTER A u'B' # 0x42 -> LATIN CAPITAL LETTER B u'C' # 0x43 -> LATIN CAPITAL LETTER C u'D' # 0x44 -> LATIN CAPITAL LETTER D u'E' # 0x45 -> LATIN CAPITAL LETTER E u'F' # 0x46 -> LATIN CAPITAL LETTER F u'G' # 0x47 -> LATIN CAPITAL LETTER G u'H' # 0x48 -> LATIN CAPITAL LETTER H u'I' # 0x49 -> LATIN CAPITAL LETTER I u'J' # 0x4A -> LATIN CAPITAL LETTER J u'K' # 0x4B -> LATIN CAPITAL LETTER K u'L' # 0x4C -> LATIN CAPITAL LETTER L u'M' # 0x4D -> LATIN CAPITAL LETTER M u'N' # 0x4E -> LATIN CAPITAL LETTER N u'O' # 0x4F -> LATIN CAPITAL LETTER O u'P' # 0x50 -> LATIN CAPITAL LETTER P u'Q' # 0x51 -> LATIN CAPITAL LETTER Q u'R' # 0x52 -> LATIN CAPITAL LETTER R u'S' # 0x53 -> LATIN CAPITAL LETTER S u'T' # 0x54 -> LATIN CAPITAL LETTER T u'U' # 0x55 -> LATIN CAPITAL LETTER U u'V' # 0x56 -> LATIN CAPITAL LETTER V u'W' # 0x57 -> LATIN CAPITAL LETTER W u'X' # 0x58 -> LATIN CAPITAL LETTER X u'Y' # 0x59 -> LATIN CAPITAL LETTER Y u'Z' # 0x5A -> LATIN CAPITAL LETTER Z u'[' # 0x5B -> LEFT SQUARE BRACKET u'\\' # 0x5C -> REVERSE SOLIDUS u']' # 0x5D -> RIGHT SQUARE BRACKET u'^' # 0x5E -> CIRCUMFLEX ACCENT u'_' # 0x5F -> LOW LINE u'`' # 0x60 -> GRAVE ACCENT u'a' # 0x61 -> LATIN SMALL LETTER A u'b' # 0x62 -> LATIN SMALL LETTER B u'c' # 0x63 -> LATIN SMALL LETTER C u'd' # 0x64 -> LATIN SMALL LETTER D u'e' # 0x65 -> LATIN SMALL LETTER E u'f' # 0x66 -> LATIN SMALL LETTER F u'g' # 0x67 -> LATIN SMALL LETTER G u'h' # 0x68 -> LATIN SMALL LETTER H u'i' # 0x69 -> LATIN SMALL LETTER I u'j' # 0x6A -> LATIN SMALL LETTER J u'k' # 0x6B -> LATIN SMALL LETTER K u'l' # 0x6C -> LATIN SMALL LETTER L u'm' # 0x6D -> LATIN SMALL LETTER M u'n' # 0x6E -> LATIN SMALL LETTER N u'o' # 0x6F -> LATIN SMALL LETTER O u'p' # 0x70 -> LATIN SMALL LETTER P u'q' # 0x71 -> LATIN SMALL LETTER Q u'r' # 0x72 -> LATIN SMALL LETTER R u's' # 0x73 -> LATIN SMALL LETTER S u't' # 0x74 -> LATIN SMALL LETTER T u'u' # 0x75 -> LATIN SMALL LETTER U u'v' # 0x76 -> LATIN SMALL LETTER V u'w' # 0x77 -> LATIN SMALL LETTER W u'x' # 0x78 -> LATIN SMALL LETTER X u'y' # 0x79 -> LATIN SMALL LETTER Y u'z' # 0x7A -> LATIN SMALL LETTER Z u'{' # 0x7B -> LEFT CURLY BRACKET u'|' # 0x7C -> VERTICAL LINE u'}' # 0x7D -> RIGHT CURLY BRACKET u'~' # 0x7E -> TILDE u'\x7f' # 0x7F -> DELETE u'\x80' # 0x80 -> <control> u'\x81' # 0x81 -> <control> u'\x82' # 0x82 -> <control> u'\x83' # 0x83 -> <control> u'\x84' # 0x84 -> <control> u'\x85' # 0x85 -> <control> u'\x86' # 0x86 -> <control> u'\x87' # 0x87 -> <control> u'\x88' # 0x88 -> <control> u'\x89' # 0x89 -> <control> u'\x8a' # 0x8A -> <control> u'\x8b' # 0x8B -> <control> u'\x8c' # 0x8C -> <control> u'\x8d' # 0x8D -> <control> u'\x8e' # 0x8E -> <control> u'\x8f' # 0x8F -> <control> u'\x90' # 0x90 -> <control> u'\x91' # 0x91 -> <control> u'\x92' # 0x92 -> <control> u'\x93' # 0x93 -> <control> u'\x94' # 0x94 -> <control> u'\x95' # 0x95 -> <control> u'\x96' # 0x96 -> <control> u'\x97' # 0x97 -> <control> u'\x98' # 0x98 -> <control> u'\x99' # 0x99 -> <control> u'\x9a' # 0x9A -> <control> u'\x9b' # 0x9B -> <control> u'\x9c' # 0x9C -> <control> u'\x9d' # 0x9D -> <control> u'\x9e' # 0x9E -> <control> u'\x9f' # 0x9F -> <control> u'\xa0' # 0xA0 -> NO-BREAK SPACE u'\u0104' # 0xA1 -> LATIN CAPITAL LETTER A WITH OGONEK u'\u0105' # 0xA2 -> LATIN SMALL LETTER A WITH OGONEK u'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE u'\u20ac' # 0xA4 -> EURO SIGN u'\u201e' # 0xA5 -> DOUBLE LOW-9 QUOTATION MARK u'\u0160' # 0xA6 -> LATIN CAPITAL LETTER S WITH CARON u'\xa7' # 0xA7 -> SECTION SIGN u'\u0161' # 0xA8 -> LATIN SMALL LETTER S WITH CARON u'\xa9' # 0xA9 -> COPYRIGHT SIGN u'\u0218' # 0xAA -> LATIN CAPITAL LETTER S WITH COMMA BELOW u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u0179' # 0xAC -> LATIN CAPITAL LETTER Z WITH ACUTE u'\xad' # 0xAD -> SOFT HYPHEN u'\u017a' # 0xAE -> LATIN SMALL LETTER Z WITH ACUTE u'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE u'\xb0' # 0xB0 -> DEGREE SIGN u'\xb1' # 0xB1 -> PLUS-MINUS SIGN u'\u010c' # 0xB2 -> LATIN CAPITAL LETTER C WITH CARON u'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE u'\u017d' # 0xB4 -> LATIN CAPITAL LETTER Z WITH CARON u'\u201d' # 0xB5 -> RIGHT DOUBLE QUOTATION MARK u'\xb6' # 0xB6 -> PILCROW SIGN u'\xb7' # 0xB7 -> MIDDLE DOT u'\u017e' # 0xB8 -> LATIN SMALL LETTER Z WITH CARON u'\u010d' # 0xB9 -> LATIN SMALL LETTER C WITH CARON u'\u0219' # 0xBA -> LATIN SMALL LETTER S WITH COMMA BELOW u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK u'\u0152' # 0xBC -> LATIN CAPITAL LIGATURE OE u'\u0153' # 0xBD -> LATIN SMALL LIGATURE OE u'\u0178' # 0xBE -> LATIN CAPITAL LETTER Y WITH DIAERESIS u'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX u'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS u'\u0106' # 0xC5 -> LATIN CAPITAL LETTER C WITH ACUTE u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS u'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE u'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX u'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS u'\u015a' # 0xD7 -> LATIN CAPITAL LETTER S WITH ACUTE u'\u0170' # 0xD8 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS u'\u0118' # 0xDD -> LATIN CAPITAL LETTER E WITH OGONEK u'\u021a' # 0xDE -> LATIN CAPITAL LETTER T WITH COMMA BELOW u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX u'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS u'\u0107' # 0xE5 -> LATIN SMALL LETTER C WITH ACUTE u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS u'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE u'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX u'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS u'\u015b' # 0xF7 -> LATIN SMALL LETTER S WITH ACUTE u'\u0171' # 0xF8 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS u'\u0119' # 0xFD -> LATIN SMALL LETTER E WITH OGONEK u'\u021b' # 0xFE -> LATIN SMALL LETTER T WITH COMMA BELOW u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS ) ### Encoding table encoding_table=codecs.charmap_build(decoding_table)
dablak/boto
refs/heads/develop
boto/dynamodb/table.py
31
# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/ # Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. # from boto.dynamodb.batch import BatchList from boto.dynamodb.schema import Schema from boto.dynamodb.item import Item from boto.dynamodb import exceptions as dynamodb_exceptions import time class TableBatchGenerator(object): """ A low-level generator used to page through results from batch_get_item operations. :ivar consumed_units: An integer that holds the number of ConsumedCapacityUnits accumulated thus far for this generator. """ def __init__(self, table, keys, attributes_to_get=None, consistent_read=False): self.table = table self.keys = keys self.consumed_units = 0 self.attributes_to_get = attributes_to_get self.consistent_read = consistent_read def _queue_unprocessed(self, res): if not u'UnprocessedKeys' in res: return if not self.table.name in res[u'UnprocessedKeys']: return keys = res[u'UnprocessedKeys'][self.table.name][u'Keys'] for key in keys: h = key[u'HashKeyElement'] r = key[u'RangeKeyElement'] if u'RangeKeyElement' in key else None self.keys.append((h, r)) def __iter__(self): while self.keys: # Build the next batch batch = BatchList(self.table.layer2) batch.add_batch(self.table, self.keys[:100], self.attributes_to_get) res = batch.submit() # parse the results if not self.table.name in res[u'Responses']: continue self.consumed_units += res[u'Responses'][self.table.name][u'ConsumedCapacityUnits'] for elem in res[u'Responses'][self.table.name][u'Items']: yield elem # re-queue un processed keys self.keys = self.keys[100:] self._queue_unprocessed(res) class Table(object): """ An Amazon DynamoDB table. :ivar name: The name of the table. :ivar create_time: The date and time that the table was created. :ivar status: The current status of the table. One of: 'ACTIVE', 'UPDATING', 'DELETING'. :ivar schema: A :class:`boto.dynamodb.schema.Schema` object representing the schema defined for the table. :ivar item_count: The number of items in the table. This value is set only when the Table object is created or refreshed and may not reflect the actual count. :ivar size_bytes: Total size of the specified table, in bytes. Amazon DynamoDB updates this value approximately every six hours. Recent changes might not be reflected in this value. :ivar read_units: The ReadCapacityUnits of the tables Provisioned Throughput. :ivar write_units: The WriteCapacityUnits of the tables Provisioned Throughput. :ivar schema: The Schema object associated with the table. """ def __init__(self, layer2, response): """ :type layer2: :class:`boto.dynamodb.layer2.Layer2` :param layer2: A `Layer2` api object. :type response: dict :param response: The output of `boto.dynamodb.layer1.Layer1.describe_table`. """ self.layer2 = layer2 self._dict = {} self.update_from_response(response) @classmethod def create_from_schema(cls, layer2, name, schema): """Create a Table object. If you know the name and schema of your table, you can create a ``Table`` object without having to make any API calls (normally an API call is made to retrieve the schema of a table). Example usage:: table = Table.create_from_schema( boto.connect_dynamodb(), 'tablename', Schema.create(hash_key=('keyname', 'N'))) :type layer2: :class:`boto.dynamodb.layer2.Layer2` :param layer2: A ``Layer2`` api object. :type name: str :param name: The name of the table. :type schema: :class:`boto.dynamodb.schema.Schema` :param schema: The schema associated with the table. :rtype: :class:`boto.dynamodb.table.Table` :return: A Table object representing the table. """ table = cls(layer2, {'Table': {'TableName': name}}) table._schema = schema return table def __repr__(self): return 'Table(%s)' % self.name @property def name(self): return self._dict['TableName'] @property def create_time(self): return self._dict.get('CreationDateTime', None) @property def status(self): return self._dict.get('TableStatus', None) @property def item_count(self): return self._dict.get('ItemCount', 0) @property def size_bytes(self): return self._dict.get('TableSizeBytes', 0) @property def schema(self): return self._schema @property def read_units(self): try: return self._dict['ProvisionedThroughput']['ReadCapacityUnits'] except KeyError: return None @property def write_units(self): try: return self._dict['ProvisionedThroughput']['WriteCapacityUnits'] except KeyError: return None def update_from_response(self, response): """ Update the state of the Table object based on the response data received from Amazon DynamoDB. """ # 'Table' is from a describe_table call. if 'Table' in response: self._dict.update(response['Table']) # 'TableDescription' is from a create_table call. elif 'TableDescription' in response: self._dict.update(response['TableDescription']) if 'KeySchema' in self._dict: self._schema = Schema(self._dict['KeySchema']) def refresh(self, wait_for_active=False, retry_seconds=5): """ Refresh all of the fields of the Table object by calling the underlying DescribeTable request. :type wait_for_active: bool :param wait_for_active: If True, this command will not return until the table status, as returned from Amazon DynamoDB, is 'ACTIVE'. :type retry_seconds: int :param retry_seconds: If wait_for_active is True, this parameter controls the number of seconds of delay between calls to update_table in Amazon DynamoDB. Default is 5 seconds. """ done = False while not done: response = self.layer2.describe_table(self.name) self.update_from_response(response) if wait_for_active: if self.status == 'ACTIVE': done = True else: time.sleep(retry_seconds) else: done = True def update_throughput(self, read_units, write_units): """ Update the ProvisionedThroughput for the Amazon DynamoDB Table. :type read_units: int :param read_units: The new value for ReadCapacityUnits. :type write_units: int :param write_units: The new value for WriteCapacityUnits. """ self.layer2.update_throughput(self, read_units, write_units) def delete(self): """ Delete this table and all items in it. After calling this the Table objects status attribute will be set to 'DELETING'. """ self.layer2.delete_table(self) def get_item(self, hash_key, range_key=None, attributes_to_get=None, consistent_read=False, item_class=Item): """ Retrieve an existing item from the table. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the requested item. The type of the value must match the type defined in the schema for the table. :type range_key: int|long|float|str|unicode|Binary :param range_key: The optional RangeKey of the requested item. The type of the value must match the type defined in the schema for the table. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` """ return self.layer2.get_item(self, hash_key, range_key, attributes_to_get, consistent_read, item_class) lookup = get_item def has_item(self, hash_key, range_key=None, consistent_read=False): """ Checks the table to see if the Item with the specified ``hash_key`` exists. This may save a tiny bit of time/bandwidth over a straight :py:meth:`get_item` if you have no intention to touch the data that is returned, since this method specifically tells Amazon not to return anything but the Item's key. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the requested item. The type of the value must match the type defined in the schema for the table. :type range_key: int|long|float|str|unicode|Binary :param range_key: The optional RangeKey of the requested item. The type of the value must match the type defined in the schema for the table. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :rtype: bool :returns: ``True`` if the Item exists, ``False`` if not. """ try: # Attempt to get the key. If it can't be found, it'll raise # an exception. self.get_item(hash_key, range_key=range_key, # This minimizes the size of the response body. attributes_to_get=[hash_key], consistent_read=consistent_read) except dynamodb_exceptions.DynamoDBKeyNotFoundError: # Key doesn't exist. return False return True def new_item(self, hash_key=None, range_key=None, attrs=None, item_class=Item): """ Return an new, unsaved Item which can later be PUT to Amazon DynamoDB. This method has explicit (but optional) parameters for the hash_key and range_key values of the item. You can use these explicit parameters when calling the method, such as:: >>> my_item = my_table.new_item(hash_key='a', range_key=1, attrs={'key1': 'val1', 'key2': 'val2'}) >>> my_item {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'} Or, if you prefer, you can simply put the hash_key and range_key in the attrs dictionary itself, like this:: >>> attrs = {'foo': 'a', 'bar': 1, 'key1': 'val1', 'key2': 'val2'} >>> my_item = my_table.new_item(attrs=attrs) >>> my_item {u'bar': 1, u'foo': 'a', 'key1': 'val1', 'key2': 'val2'} The effect is the same. .. note: The explicit parameters take priority over the values in the attrs dict. So, if you have a hash_key or range_key in the attrs dict and you also supply either or both using the explicit parameters, the values in the attrs will be ignored. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the new item. The type of the value must match the type defined in the schema for the table. :type range_key: int|long|float|str|unicode|Binary :param range_key: The optional RangeKey of the new item. The type of the value must match the type defined in the schema for the table. :type attrs: dict :param attrs: A dictionary of key value pairs used to populate the new item. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` """ return item_class(self, hash_key, range_key, attrs) def query(self, hash_key, *args, **kw): """ Perform a query on the table. :type hash_key: int|long|float|str|unicode|Binary :param hash_key: The HashKey of the requested item. The type of the value must match the type defined in the schema for the table. :type range_key_condition: :class:`boto.dynamodb.condition.Condition` :param range_key_condition: A Condition object. Condition object can be one of the following types: EQ|LE|LT|GE|GT|BEGINS_WITH|BETWEEN The only condition which expects or will accept two values is 'BETWEEN', otherwise a single value should be passed to the Condition constructor. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type consistent_read: bool :param consistent_read: If True, a consistent read request is issued. Otherwise, an eventually consistent request is issued. :type scan_index_forward: bool :param scan_index_forward: Specified forward or backward traversal of the index. Default is forward (True). :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Query operation, even if the operation has no matching items for the assigned filter. If count is True, the actual items are not returned and the count is accessible as the ``count`` attribute of the returned object. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` """ return self.layer2.query(self, hash_key, *args, **kw) def scan(self, *args, **kw): """ Scan through this table, this is a very long and expensive operation, and should be avoided if at all possible. :type scan_filter: A dict :param scan_filter: A dictionary where the key is the attribute name and the value is a :class:`boto.dynamodb.condition.Condition` object. Valid Condition objects include: * EQ - equal (1) * NE - not equal (1) * LE - less than or equal (1) * LT - less than (1) * GE - greater than or equal (1) * GT - greater than (1) * NOT_NULL - attribute exists (0, use None) * NULL - attribute does not exist (0, use None) * CONTAINS - substring or value in list (1) * NOT_CONTAINS - absence of substring or value in list (1) * BEGINS_WITH - substring prefix (1) * IN - exact match in list (N) * BETWEEN - >= first value, <= second value (2) :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. If count is True, the actual items are not returned and the count is accessible as the ``count`` attribute of the returned object. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :return: A TableGenerator (generator) object which will iterate over all results :rtype: :class:`boto.dynamodb.layer2.TableGenerator` """ return self.layer2.scan(self, *args, **kw) def batch_get_item(self, keys, attributes_to_get=None): """ Return a set of attributes for a multiple items from a single table using their primary keys. This abstraction removes the 100 Items per batch limitations as well as the "UnprocessedKeys" logic. :type keys: list :param keys: A list of scalar or tuple values. Each element in the list represents one Item to retrieve. If the schema for the table has both a HashKey and a RangeKey, each element in the list should be a tuple consisting of (hash_key, range_key). If the schema for the table contains only a HashKey, each element in the list should be a scalar value of the appropriate type for the table schema. NOTE: The maximum number of items that can be retrieved for a single operation is 100. Also, the number of items retrieved is constrained by a 1 MB size limit. :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :return: A TableBatchGenerator (generator) object which will iterate over all results :rtype: :class:`boto.dynamodb.table.TableBatchGenerator` """ return TableBatchGenerator(self, keys, attributes_to_get)
mrshelly/openerp71313
refs/heads/master
openerp/addons/base_gengo/res_company.py
34
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class res_company(osv.Model): _name = "res.company" _inherit = "res.company" _columns = { "gengo_private_key": fields.text("Gengo Private Key"), "gengo_public_key": fields.text("Gengo Public Key"), "gengo_comment": fields.text("Comments", help="This comment will be automatically be enclosed in each an every request sent to Gengo"), "gengo_auto_approve": fields.boolean("Auto Approve Translation ?", help="Jobs are Automatically Approved by Gengo."), } _defaults = { "gengo_auto_approve": True, }
erinspace/osf.io
refs/heads/develop
admin_tests/common_auth/test_views.py
30
from nose import tools as nt import mock from django.test import RequestFactory from django.http import Http404 from tests.base import AdminTestCase from osf_tests.factories import AuthUserFactory from admin_tests.utilities import setup_form_view from osf.models.user import OSFUser from admin.common_auth.views import RegisterUser from admin.common_auth.forms import UserRegistrationForm class TestRegisterUser(AdminTestCase): def setUp(self): super(TestRegisterUser, self).setUp() self.user = AuthUserFactory() self.data = { 'osf_id': 'abc12', } self.view = RegisterUser() self.request = RequestFactory().post('fake_path') def test_osf_id_invalid(self): form = UserRegistrationForm(data=self.data) nt.assert_true(form.is_valid()) view = setup_form_view(self.view, self.request, form) with nt.assert_raises(Http404): view.form_valid(form) @mock.patch('admin.common_auth.views.messages.success') def test_add_user(self, mock_save): count = OSFUser.objects.count() self.data.update(osf_id=self.user._id) form = UserRegistrationForm(data=self.data) nt.assert_true(form.is_valid()) view = setup_form_view(self.view, self.request, form) view.form_valid(form) nt.assert_true(mock_save.called) nt.assert_equal(OSFUser.objects.count(), count + 1)
merelinguist/ludolatin
refs/heads/master
app/misc/__init__.py
3
from flask import Blueprint misc = Blueprint('misc', __name__) from . import views
stefanw/froide
refs/heads/master
froide/bounce/signals.py
1
from django.dispatch import Signal user_email_bounced = Signal(providing_args=['bounce', 'should_deactivate']) email_bounced = Signal(providing_args=['bounce', 'should_deactivate']) email_unsubscribed = Signal(providing_args=['email', 'reference'])
vineodd/PIMSim
refs/heads/master
GEM5Simulation/gem5/src/arch/x86/isa/insts/general_purpose/string/store_string.py
91
# Copyright (c) 2007-2008 The Hewlett-Packard Development Company # All rights reserved. # # The license below extends only to copyright in the software and shall # not be construed as granting a license to any other intellectual # property including but not limited to intellectual property relating # to a hardware implementation of the functionality of the software # licensed hereunder. You may use the software subject to the license # terms below provided that you ensure that this notice is replicated # unmodified and in its entirety in all distributions of the software, # modified or unmodified, in source code or in binary form. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer; # redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution; # neither the name of the copyright holders nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # Authors: Gabe Black microcode = ''' def macroop STOS_M { # Find the constant we need to either add or subtract from rdi ruflag t0, 10 movi t3, t3, dsz, flags=(CEZF,), dataSize=asz subi t4, t0, dsz, dataSize=asz mov t3, t3, t4, flags=(nCEZF,), dataSize=asz st rax, es, [1, t0, rdi] add rdi, rdi, t3, dataSize=asz }; def macroop STOS_E_M { and t0, rcx, rcx, flags=(EZF,), dataSize=asz br label("end"), flags=(CEZF,) # Find the constant we need to either add or subtract from rdi ruflag t0, 10 movi t3, t3, dsz, flags=(CEZF,), dataSize=asz subi t4, t0, dsz, dataSize=asz mov t3, t3, t4, flags=(nCEZF,), dataSize=asz topOfLoop: st rax, es, [1, t0, rdi] subi rcx, rcx, 1, flags=(EZF,), dataSize=asz add rdi, rdi, t3, dataSize=asz br label("topOfLoop"), flags=(nCEZF,) end: fault "NoFault" }; '''
rocketgithub/l10n_gt_extra
refs/heads/master
__openerp__.py
1
# -*- encoding: utf-8 -*- ############################################################################## # # Copyright (c) 2009-2012 Soluciones Tecnologócias Prisma S.A. All Rights Reserved. # José Rodrigo Fernández Menegazzo, Soluciones Tecnologócias Prisma S.A. # (http://www.solucionesprisma.com) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Guatemala - Reportes y funcionalidad extra', 'version': '1.0', 'category': 'Localization', 'description': """ Rerporte requeridos por la SAT y otra funcionalidad extra para llevar un contabilidad en Guatemala. """, 'author': 'José Rodrigo Fernández Menegazzo', 'website': 'http://solucionesprisma.com/', 'depends': ['l10n_gt'], 'data': [ 'account_invoice_view.xml', 'res_partner_view.xml', 'reports.xml', ], 'demo': [], 'installable': True, 'images': ['images/config_chart_l10n_gt.jpeg','images/l10n_gt_chart.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
RebeccaWPerry/vispy
refs/heads/master
vispy/visuals/filters/picking.py
7
# -*- coding: utf-8 -*- # Copyright (c) 2014, Vispy Development Team. # Distributed under the (new) BSD License. See LICENSE.txt for more info. import struct import weakref from ..shaders import Function class PickingFilter(object): """Filter used to color visuals by a picking ID. Note that the ID color uses the alpha channel, so this may not be used with blending enabled. """ def __init__(self, id_=None): self.shader = Function(""" void picking_filter() { if( $enabled == 0 ) return; if( gl_FragColor.a == 0 ) discard; gl_FragColor = $id_color; } """) self.id = id_ self.enabled = False @property def id(self): return self._id @id.setter def id(self, id): if id < 1: raise ValueError('Picking ID must be integer > 0.') id_color = struct.unpack('<4B', struct.pack('<I', id)) self.shader['id_color'] = [x/255. for x in id_color] self._id = id self._id_color = id_color @property def enabled(self): return self._enabled @enabled.setter def enabled(self, e): self._enabled = e self.shader['enabled'] = 1 if e is True else 0 @property def color(self): """ The RGBA color that will be drawn to the framebuffer for visuals that use this filter. """ return self._id_color def _attach(self, visual): self._visual = weakref.ref(visual) hook = self._visual()._get_hook('frag', 'post') hook.add(self.shader(), position=10)
dahlstrom-g/intellij-community
refs/heads/master
python/testData/inspections/PyDunderSlotsInspection/inheritedClassAttrAssignmentAndOwnWithAttrAndInheritedWithDictSlots.py
35
class B(object): attr = 'baz' __slots__ = ['f', 'b', '__dict__'] class C(B): __slots__ = ['attr', 'bar'] C.attr = 'spam' print(C.attr) c = C() c.attr = 'spam' print(c.attr)
gauribhoite/personfinder
refs/heads/master
env/google_appengine/lib/django-0.96/django/utils/itercompat.py
33
""" Providing iterator functions that are not in all version of Python we support. Where possible, we try to use the system-native version and only fall back to these implementations if necessary. """ import itertools def compat_tee(iterable): """Return two independent iterators from a single iterable. Based on http://www.python.org/doc/2.3.5/lib/itertools-example.html """ # Note: Using a dictionary and a list as the default arguments here is # deliberate and safe in this instance. def gen(next, data={}, cnt=[0]): dpop = data.pop for i in itertools.count(): if i == cnt[0]: item = data[i] = next() cnt[0] += 1 else: item = dpop(i) yield item next = iter(iterable).next return gen(next), gen(next) if hasattr(itertools, 'tee'): tee = itertools.tee else: tee = compat_tee
hideshis/scripts_for_research
refs/heads/master
FOSE2016/dependency/dependencyFinder_exe.py
1
# -*- coding: utf-8 -*- import subprocess import os pjt_path = '/Users/hideshi-s/Desktop/httpclient' result = subprocess.check_output('find ' + pjt_path + ' | grep "\.jar$"', shell=True) result = result.replace('\r', '') result_list = result.split('\n') result_list.pop() for jar_file in result_list: jar_path = jar_file.split('/') target_index = jar_path.index('target') if jar_path[target_index+1].endswith('.jar'): if not (jar_path[-1].endswith('javadoc.jar') or jar_path[-1].endswith('sources.jar') or jar_path[-1].endswith('tests.jar')): print jar_file #DependencyExtractor -xml -out df.xml httpclient5-5.0-alpha2-SNAPSHOT.jar component_name = jar_path[target_index-1] DF_report_name = './' + component_name + '.xml' DF_report_parsed_name = './' + component_name + '.csv' print 'finding dependencies...' os.system('DependencyExtractor -xml -out ' + DF_report_name + ' ' + jar_file) print 'parsing dependencyFinder report...' os.system('python df_xml_parser.py ' + DF_report_name + ' ' + DF_report_parsed_name)
mewtaylor/django
refs/heads/master
tests/utils_tests/test_feedgenerator.py
163
from __future__ import unicode_literals import datetime import unittest from django.utils import feedgenerator from django.utils.timezone import get_fixed_timezone class FeedgeneratorTest(unittest.TestCase): """ Tests for the low-level syndication feed framework. """ def test_get_tag_uri(self): """ Test get_tag_uri() correctly generates TagURIs. """ self.assertEqual( feedgenerator.get_tag_uri('http://example.org/foo/bar#headline', datetime.date(2004, 10, 25)), 'tag:example.org,2004-10-25:/foo/bar/headline') def test_get_tag_uri_with_port(self): """ Test that get_tag_uri() correctly generates TagURIs from URLs with port numbers. """ self.assertEqual( feedgenerator.get_tag_uri('http://www.example.org:8000/2008/11/14/django#headline', datetime.datetime(2008, 11, 14, 13, 37, 0)), 'tag:www.example.org,2008-11-14:/2008/11/14/django/headline') def test_rfc2822_date(self): """ Test rfc2822_date() correctly formats datetime objects. """ self.assertEqual( feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0)), "Fri, 14 Nov 2008 13:37:00 -0000" ) def test_rfc2822_date_with_timezone(self): """ Test rfc2822_date() correctly formats datetime objects with tzinfo. """ self.assertEqual( feedgenerator.rfc2822_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(60))), "Fri, 14 Nov 2008 13:37:00 +0100" ) def test_rfc2822_date_without_time(self): """ Test rfc2822_date() correctly formats date objects. """ self.assertEqual( feedgenerator.rfc2822_date(datetime.date(2008, 11, 14)), "Fri, 14 Nov 2008 00:00:00 -0000" ) def test_rfc3339_date(self): """ Test rfc3339_date() correctly formats datetime objects. """ self.assertEqual( feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0)), "2008-11-14T13:37:00Z" ) def test_rfc3339_date_with_timezone(self): """ Test rfc3339_date() correctly formats datetime objects with tzinfo. """ self.assertEqual( feedgenerator.rfc3339_date(datetime.datetime(2008, 11, 14, 13, 37, 0, tzinfo=get_fixed_timezone(120))), "2008-11-14T13:37:00+02:00" ) def test_rfc3339_date_without_time(self): """ Test rfc3339_date() correctly formats date objects. """ self.assertEqual( feedgenerator.rfc3339_date(datetime.date(2008, 11, 14)), "2008-11-14T00:00:00Z" ) def test_atom1_mime_type(self): """ Test to make sure Atom MIME type has UTF8 Charset parameter set """ atom_feed = feedgenerator.Atom1Feed("title", "link", "description") self.assertEqual( atom_feed.content_type, "application/atom+xml; charset=utf-8" ) def test_rss_mime_type(self): """ Test to make sure RSS MIME type has UTF8 Charset parameter set """ rss_feed = feedgenerator.Rss201rev2Feed("title", "link", "description") self.assertEqual( rss_feed.content_type, "application/rss+xml; charset=utf-8" ) # Two regression tests for #14202 def test_feed_without_feed_url_gets_rendered_without_atom_link(self): feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr') self.assertEqual(feed.feed['feed_url'], None) feed_content = feed.writeString('utf-8') self.assertNotIn('<atom:link', feed_content) self.assertNotIn('href="/feed/"', feed_content) self.assertNotIn('rel="self"', feed_content) def test_feed_with_feed_url_gets_rendered_with_atom_link(self): feed = feedgenerator.Rss201rev2Feed('title', '/link/', 'descr', feed_url='/feed/') self.assertEqual(feed.feed['feed_url'], '/feed/') feed_content = feed.writeString('utf-8') self.assertIn('<atom:link', feed_content) self.assertIn('href="/feed/"', feed_content) self.assertIn('rel="self"', feed_content)
flberger/fabula
refs/heads/audioclient
fabula/interfaces/json_rpc.py
1
"""Fabula JSON RPC Interface Copyright 2012 Florian Berger <fberger@florian-berger.de> """ # This file is part of Fabula. # # Fabula is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Fabula is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Fabula. If not, see <http://www.gnu.org/licenses/>. # Work started on 26. Mar 2012 # # First attempt implemented using the bjsonrpc module, but it has not yet been # ported to Python 3, and did not allow for convenient management of clients # by IP number. # # Now this is a copy of fabula.interfaces.python_tcp, but handling the byte # streams as JSON-RPC objects. import fabula.interfaces.python_tcp import json class JSONRPCServerInterface(fabula.interfaces.python_tcp.TCPServerInterface): """Fabula Server interface using JSON-RPC. """ def __init__(self): """Initialisation. Additional attributes: JSONRPCServerInterface.decoder An instance of json.JSONDecoder. JSONRPCServerInterface.json_rpc_id_list A list containing the incoming JSON-RPC ids, to be used as a FIFO. """ # Call base class # fabula.interfaces.python_tcp.TCPServerInterface.__init__(self) self.decoder = json.JSONDecoder() self.json_rpc_id_list = [] # The main difference to TCPServerInterface is a RequestHandler that # handles JSON-RPC. parent = self # We define the class here to be able to access local variables through # parent. # # TODO: FabulaRequestHandler copied from TCPServerInterface, but the actual difference is only a few lines. Abstract protocol. # class FabulaRequestHandler(fabula.interfaces.python_tcp.socketserver.BaseRequestHandler): def handle(self): # TODO: this will run in a thread spawned by the custom ThreadingTCPServer class. How thread safe are these operations? # Fabula uses persistent TCP connections, so every call to this # method should be from a new client. Blindly add this one. # message_buffer = parent.connections[self.client_address] = fabula.interfaces.MessageBuffer() fabula.LOGGER.info("adding and handling new client: {}".format(self.client_address)) # Register in thread list, which is used for Interface shutdown # parent.thread_list.append(fabula.interfaces.python_tcp.threading.current_thread()) # Now, handle messages in a persistent fashion. self.request.settimeout(0.3) received_data = bytearray() while not parent.shutdown_flag: # TODO: partly copied from TCPClientInterface.handle_messages() with a few renamings # Only the Interface may add connections to # Interface.connections, but the server may remove them if # a client exits on the application level. # So, first check whether the client handled by this # FabulaRequestHandler has been removed, and if so, # terminate. # if not self.client_address in parent.connections.keys(): fabula.LOGGER.info("client '{}' has been removed by the server".format(self.client_address)) # We are *not* setting parent.shutdown_flag, since only # this connection should terminate. try: self.request.shutdown(fabula.interfaces.python_tcp.socket.SHUT_RDWR) except: # Socket may be unavailable already # fabula.LOGGER.warning("could not shut down socket") self.request.close() fabula.LOGGER.info("handler connection closed, stopping thread") raise SystemExit # First deliver waiting local messages. # if message_buffer.messages_for_remote: fabula.LOGGER.debug("sending 1 message of {} to {}".format(len(message_buffer.messages_for_remote), self.client_address)) # Send a JSON representation, wrapped as a JSON-RPC # response. # json_rpc_response = '{{"result" : {}, "error" : null, "id" : {}}}' json_event_list = '[{}]'.format(", ".join([event.json() for event in message_buffer.messages_for_remote.popleft().event_list])) try: id = parent.json_rpc_id_list.pop(0) except IndexError: fabula.LOGGER.debug("List of queued JSON_RPC ids exhausted, sending JSON-RPC notification instead") id = 'null' representation = json_rpc_response.format(json_event_list, id) fabula.LOGGER.debug("attempting to send {}".format(representation)) try: # Add a double newline as separator for convenience. # Use ASCII. # TODO: implement UTF-16 # self.request.sendall(bytes(representation + "\n\n", "ascii")) except fabula.interfaces.python_tcp.socket.error: fabula.LOGGER.error("socket error while sending to {}".format(self.client_address)) try: fabula.LOGGER.debug("closing socket") self.request.shutdown(fabula.interfaces.python_tcp.socket.SHUT_RDWR) except: # Socket may be unavailable already # fabula.LOGGER.warning("could not shut down socket") self.request.close() fabula.LOGGER.info("handler connection closed") # This is the only way to notify the Server # fabula.LOGGER.debug("removing connection from connections dict") del parent.connections[self.client_address] fabula.LOGGER.debug("removing thread from thread list") parent.thread_list.remove(threading.current_thread()) fabula.LOGGER.info("stopping thread") raise SystemExit # Now listen for incoming client messages for some time (set # above). This should catch any messages received in the # meantime by the OS. # chunk = None try: # TODO: evaluate recv size # chunk = self.request.recv(4096) except fabula.interfaces.python_tcp.socket.timeout: # Nobody likes us, evereyone left us, there all out # without us, having fun... # pass except fabula.interfaces.python_tcp.socket.error: fabula.LOGGER.error("socket error while receiving") if chunk: fabula.LOGGER.debug("received {} bytes from {}".format(len(chunk), self.client_address)) # Assuming we are dealing with bytes here # received_data.extend(chunk) # Strip leading whitespace. Works on bytearrays. # TODO: check for either ASCII or UTF-16 before stripping leading whitespace. UTF-16 strings should start with FFFE. # received_data = received_data.lstrip() json_decoded = end_index = None try: # Currently we only accept ASCII. # TODO: Accept UTF-16 for convenient length calculation # json_decoded, end_index = parent.decoder.raw_decode(str(received_data, "ascii")) except ValueError: fabula.LOGGER.debug("JSON object not yet complete, keeping buffer") # There actually may be more than one JSON-RCP request # in received_data. Catch them all! # while (json_decoded is not None and end_index is not None): # Found! received_data = received_data[end_index:] msg = "message from {} complete at {} bytes, {} left in buffer" fabula.LOGGER.debug(msg.format(self.client_address, end_index, len(received_data))) fabula.LOGGER.debug("decoded JSON: {}".format(json_decoded)) message = parent.json_to_message(json_decoded["params"]) # Queue id # fabula.LOGGER.debug("queueing request id '{}'".format(json_decoded["id"])) parent.json_rpc_id_list.append(json_decoded["id"]) message_buffer.messages_for_local.append(message) # Next # json_decoded = end_index = None if len(received_data): try: json_decoded, end_index = parent.decoder.raw_decode(str(received_data, "ascii")) except ValueError: fabula.LOGGER.debug("JSON object not yet complete, keeping buffer") # No more complete JSON-RPC objects, end of evaluation. # No need to run as fast as possible. # fabula.interfaces.python_tcp.sleep(1/60) fabula.LOGGER.debug("shutdown flag set in parent") # Deliver waiting local messages. # while len(message_buffer.messages_for_remote): # Copied from above # fabula.LOGGER.debug("sending 1 message of {} to {}".format(len(message_buffer.messages_for_remote), self.client_address)) # Send a JSON representation, wrapped as a JSON-RPC # response. # json_rpc_response = '{{"result" : {}, "error" : null, "id" : {}}}' json_event_list = '[{}]'.format(", ".join([event.json() for event in message_buffer.messages_for_remote.popleft().event_list])) try: id = parent.json_rpc_id_list.pop(0) except IndexError: fabula.LOGGER.debug("List of queued JSON_RPC ids exhausted, sending JSON-RPC notification instead") id = 'null' representation = json_rpc_response.format(json_event_list, id) # Add a double newline as separator for convenience. # Use ASCII. # TODO: implement UTF-16 # self.request.sendall(bytes(representation + "\n\n", "ascii")) try: self.request.shutdown(fabula.interfaces.python_tcp.socket.SHUT_RDWR) except: # Socket may be unavailable already # fabula.LOGGER.warning("could not shut down socket") self.request.close() fabula.LOGGER.info("handler connection closed, stopping thread") raise SystemExit # End of class. self.FabulaRequestHandler = FabulaRequestHandler return def json_to_message(self, json_event_list): """Read a list of JSON Event representations converted to dicts, and return an according Fabula Message. """ message = fabula.Message([]) for event_dict in json_event_list: fabula.LOGGER.debug("attempting to recreate Event from {}".format(event_dict)) event_class = fabula.__dict__[event_dict["class"]] del event_dict["class"] # Convert target identifiers from JSON lists to tuples # if ("target_identifier" in event_dict.keys() and type(event_dict["target_identifier"]) is list): event_dict["target_identifier"] = tuple(event_dict["target_identifier"]) # Using the infamous argument unpacking from dict # TODO: this will of course fail with Tiles and Entities # event = event_class(**event_dict) fabula.LOGGER.debug("recreated {}".format(event)) message.event_list.append(event) return message
ajayaa/keystone
refs/heads/master
keystone/tests/unit/ksfixtures/cache.py
2
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from keystone.common import cache class Cache(fixtures.Fixture): """A fixture for setting up and tearing down the cache between test cases. """ def setUp(self): super(Cache, self).setUp() # NOTE(dstanek): We must remove the existing cache backend in the # setUp instead of the tearDown because it defaults to a no-op cache # and we want the configure call below to create the correct backend. # NOTE(morganfainberg): The only way to reconfigure the CacheRegion # object on each setUp() call is to remove the .backend property. if cache.CACHE_REGION.is_configured: del cache.CACHE_REGION.backend # ensure the cache region instance is setup cache.configure_cache()
tersmitten/ansible
refs/heads/devel
test/units/module_utils/common/validation/test_check_type_bytes.py
83
# -*- coding: utf-8 -*- # Copyright (c) 2019 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import pytest from ansible.module_utils._text import to_native from ansible.module_utils.common.validation import check_type_bytes def test_check_type_bytes(): test_cases = ( ('1', 1), (99, 99), (1.5, 2), ('1.5', 2), ('2b', 2), ('2B', 2), ('2k', 2048), ('2K', 2048), ('2KB', 2048), ('1m', 1048576), ('1M', 1048576), ('1MB', 1048576), ('1g', 1073741824), ('1G', 1073741824), ('1GB', 1073741824), (1073741824, 1073741824), ) for case in test_cases: assert case[1] == check_type_bytes(case[0]) def test_check_type_bytes_fail(): test_cases = ( 'foo', '2kb', '2Kb', '1mb', '1Mb', '1gb', '1Gb', ) for case in test_cases: with pytest.raises(TypeError) as e: check_type_bytes(case) assert 'cannot be converted to a Byte value' in to_native(e.value)
vyscond/cocos
refs/heads/master
cocos/shader.py
3
# ---------------------------------------------------------------------------- # cocos2d # Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, # Lucio Torre # Copyright (c) 2009-2015 Richard Jones, Claudio Canepa # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of cocos2d nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- from __future__ import division, print_function, unicode_literals import six from . import compat from ctypes import * from pyglet.gl import * class GLSLException(Exception): pass def glsl_log(handle): if handle == 0: return '' log_len = c_int(0) glGetObjectParameterivARB(handle, GL_OBJECT_INFO_LOG_LENGTH_ARB, byref(log_len)) if log_len.value == 0: return '' log = create_string_buffer(log_len.value) # does log_len include the NUL? chars_written = c_int(0) glGetInfoLogARB(handle, log_len.value, byref(chars_written), log) return log.value class Shader(object): s_tag = 0 def __init__(self, name, prog): prog = compat.asciibytes(prog) self.name = name self.prog = prog self.shader = 0 self.compiling = False self.tag = -1 self.dependencies = [] def __del__(self): self.destroy() def _source(self): if self.tag == Shader.s_tag: return [] self.tag = Shader.s_tag r = [] for d in self.dependencies: r.extend(d._source()) r.append(self.prog) return r def _compile(self): if self.shader: return if self.compiling: return self.compiling = True self.shader = glCreateShaderObjectARB(self.shaderType()) if self.shader == 0: raise GLSLException('faled to create shader object') prog = c_char_p(self.prog) length = c_int(-1) glShaderSourceARB(self.shader, 1, cast(byref(prog), POINTER(POINTER(c_char))), byref(length)) glCompileShaderARB(self.shader) self.compiling = False compile_status = c_int(0) glGetObjectParameterivARB(self.shader, GL_OBJECT_COMPILE_STATUS_ARB, byref(compile_status)) if not compile_status.value: err = glsl_log(self.shader) glDeleteObjectARB(self.shader) self.shader = 0 raise GLSLException('failed to compile shader', err) def _attachTo(self, program): if self.tag == Shader.s_tag: return self.tag = Shader.s_tag for d in self.dependencies: d._attachTo(program) if self.isCompiled(): glAttachObjectARB(program, self.shader) def addDependency(self, shader): self.dependencies.append(shader) return self def destroy(self): if self.shader != 0: glDeleteObjectARB(self.shader) def shaderType(self): raise NotImplementedError() def isCompiled(self): return self.shader != 0 def attachTo(self, program): Shader.s_tag = Shader.s_tag + 1 self._attachTo(program) # ATI/apple's glsl compiler is broken. def attachFlat(self, program): if self.isCompiled(): glAttachObjectARB(program, self.shader) def compileFlat(self): if self.isCompiled(): return self.shader = glCreateShaderObjectARB(self.shaderType()) if self.shader == 0: raise GLSLException('faled to create shader object') all_source = [b'\n'.join(self._source())] prog = (c_char_p * len(all_source))(*all_source) length = (c_int * len(all_source))(-1) glShaderSourceARB(self.shader, len(all_source), cast(prog, POINTER(POINTER(c_char))), length) glCompileShaderARB(self.shader) compile_status = c_int(0) glGetObjectParameterivARB(self.shader, GL_OBJECT_COMPILE_STATUS_ARB, byref(compile_status)) if not compile_status.value: err = glsl_log(self.shader) glDeleteObjectARB(self.shader) self.shader = 0 raise GLSLException('failed to compile shader', err) def compile(self): if self.isCompiled(): return for d in self.dependencies: d.compile() self._compile() class VertexShader(Shader): def shaderType(self): return GL_VERTEX_SHADER_ARB class FragmentShader(Shader): def shaderType(self): return GL_FRAGMENT_SHADER_ARB class ShaderProgram(object): @classmethod def simple_program(cls, name, vertex_code, fragment_code): """Intended to cut boilerplate when doing simple shaders name : string with program name vertex_code : None or string with the vertex shader code fragment_code : None or string with the fragment shader code """ shader_p = cls() if vertex_code: shader_p.setShader(VertexShader(name + '_vp', vertex_code)) if fragment_code: shader_p.setShader(FragmentShader(name + '_fp', fragment_code)) # link now to allow fail early shader_p.prog() return shader_p def __init__(self, vertex_shader=None, fragment_shader=None): self.vertex_shader = vertex_shader self.fragment_shader = fragment_shader self.program = 0 def __del__(self): self.destroy() def destroy(self): if self.program != 0: glDeleteObjectARB(self.program) def setShader(self, shader): if isinstance(shader, FragmentShader): self.fragment_shader = shader if isinstance(shader, VertexShader): self.vertex_shader = shader if self.program != 0: glDeleteObjectARB(self.program) def link(self): if self.vertex_shader is not None: self.vertex_shader.compileFlat() if self.fragment_shader is not None: self.fragment_shader.compileFlat() self.program = glCreateProgramObjectARB() if self.program == 0: raise GLSLException('failed to create program object') if self.vertex_shader is not None: self.vertex_shader.attachFlat(self.program) if self.fragment_shader is not None: self.fragment_shader.attachFlat(self.program) glLinkProgramARB(self.program) link_status = c_int(0) glGetObjectParameterivARB(self.program, GL_OBJECT_LINK_STATUS_ARB, byref(link_status)) if link_status.value == 0: err = glsl_log(self.program) glDeleteObjectARB(self.program) self.program = 0 raise GLSLException('failed to link shader', err) self.__class__._uloc_ = {} self.__class__._vloc_ = {} return self.program def prog(self): if self.program: return self.program return self.link() def install(self): p = self.prog() if p != 0: glUseProgramObjectARB(p) def uninstall(self): glUseProgramObjectARB(0) def uniformLoc(self, var): var = compat.asciibytes(var) try: return self.__class__._uloc_[var] except: if self.program == 0: self.link() self.__class__._uloc_[var] = v = glGetUniformLocationARB(self.program, var) return v def uset1F(self, var, x): glUniform1fARB(self.uniformLoc(var), x) def uset2F(self, var, x, y): glUniform2fARB(self.uniformLoc(var), x, y) def uset3F(self, var, x, y, z): glUniform3fARB(self.uniformLoc(var), x, y, z) def uset4F(self, var, x, y, z, w): glUniform4fARB(self.uniformLoc(var), x, y, z, w) def uset1I(self, var, x): glUniform1iARB(self.uniformLoc(var), x) def uset2I(self, var, x, y): glUniform2iARB(self.uniformLoc(var), x, y) def uset3I(self, var, x, y, z): glUniform3iARB(self.uniformLoc(var), x, y, z) def usetM4F(self, var, matrix, transpose=False): # some matrixs readed from openGl will come as the transpose of the # matrix we want to feed, so there it comes handy the transpose param glUniformMatrix4fvARB(self.uniformLoc(var), 1, transpose, (c_float * 16)(*matrix)) def usetTex(self, var, unit, target, tx): """ var : name of variable to write unit : texture unit target : target for glBindTexture tx : texture ID """ glUniform1iARB(self.uniformLoc(var), unit) glActiveTexture(GL_TEXTURE0 + unit) glBindTexture(target, tx) __all__ = ['VertexShader', 'FragmentShader', 'ShaderProgram', 'GLSLException']
erdc-cm/air-water-vv
refs/heads/master
3d/Penny_Price_Diffraction/Penny_Price_Diffraction_01/redist_p.py
4
from proteus import * from proteus.default_p import * from math import * from tank3D import * from proteus.mprans import RDLS """ The redistancing equation in the sloshbox test problem. """ LevelModelType = RDLS.LevelModel coefficients = RDLS.Coefficients(applyRedistancing=applyRedistancing, epsFact=epsFact_redistance, nModelId=2, rdModelId=3, useMetrics=useMetrics, backgroundDiffusionFactor=backgroundDiffusionFactor) def getDBC_rd(x,flag): pass dirichletConditions = {0:getDBC_rd} weakDirichletConditions = {0:RDLS.setZeroLSweakDirichletBCsSimple} advectiveFluxBoundaryConditions = {} diffusiveFluxBoundaryConditions = {0:{}} class PerturbedSurface_phi: def uOfXT(self,x,t): return signedDistance(x) initialConditions = {0:PerturbedSurface_phi()}
affo/nova
refs/heads/master
nova/tests/unit/objects/test_fixed_ip.py
46
# Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock import netaddr from oslo_utils import timeutils from nova import exception from nova.objects import fixed_ip from nova.tests.unit import fake_instance from nova.tests.unit.objects import test_network from nova.tests.unit.objects import test_objects fake_fixed_ip = { 'created_at': None, 'updated_at': None, 'deleted_at': None, 'deleted': False, 'id': 123, 'address': '192.168.1.100', 'network_id': None, 'virtual_interface_id': None, 'instance_uuid': None, 'allocated': False, 'leased': False, 'reserved': False, 'host': None, 'network': None, 'virtual_interface': None, 'floating_ips': [], } class _TestFixedIPObject(object): def _compare(self, obj, db_obj): for field in obj.fields: if field in ('default_route', 'floating_ips'): continue if field in fixed_ip.FIXED_IP_OPTIONAL_ATTRS: if obj.obj_attr_is_set(field) and db_obj[field] is not None: obj_val = obj[field].uuid db_val = db_obj[field]['uuid'] else: continue else: obj_val = obj[field] db_val = db_obj[field] if isinstance(obj_val, netaddr.IPAddress): obj_val = str(obj_val) self.assertEqual(db_val, obj_val) @mock.patch('nova.db.fixed_ip_get') def test_get_by_id(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123) get.assert_called_once_with(self.context, 123, get_network=False) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get') @mock.patch('nova.db.network_get') def test_get_by_id_with_extras(self, network_get, fixed_get): db_fixed = dict(fake_fixed_ip, network=test_network.fake_network) fixed_get.return_value = db_fixed fixedip = fixed_ip.FixedIP.get_by_id(self.context, 123, expected_attrs=['network']) fixed_get.assert_called_once_with(self.context, 123, get_network=True) self._compare(fixedip, db_fixed) self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) self.assertFalse(network_get.called) @mock.patch('nova.db.fixed_ip_get_by_address') def test_get_by_address(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4') get.assert_called_once_with(self.context, '1.2.3.4', columns_to_join=[]) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.instance_get') def test_get_by_address_with_extras(self, instance_get, network_get, fixed_get): db_fixed = dict(fake_fixed_ip, network=test_network.fake_network, instance=fake_instance.fake_db_instance()) fixed_get.return_value = db_fixed fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4', expected_attrs=['network', 'instance']) fixed_get.assert_called_once_with(self.context, '1.2.3.4', columns_to_join=['network', 'instance']) self._compare(fixedip, db_fixed) self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) self.assertEqual(db_fixed['instance']['uuid'], fixedip.instance.uuid) self.assertFalse(network_get.called) self.assertFalse(instance_get.called) @mock.patch('nova.db.fixed_ip_get_by_address') @mock.patch('nova.db.network_get') @mock.patch('nova.db.instance_get') def test_get_by_address_with_extras_deleted_instance(self, instance_get, network_get, fixed_get): db_fixed = dict(fake_fixed_ip, network=test_network.fake_network, instance=None) fixed_get.return_value = db_fixed fixedip = fixed_ip.FixedIP.get_by_address(self.context, '1.2.3.4', expected_attrs=['network', 'instance']) fixed_get.assert_called_once_with(self.context, '1.2.3.4', columns_to_join=['network', 'instance']) self._compare(fixedip, db_fixed) self.assertEqual(db_fixed['network']['uuid'], fixedip.network.uuid) self.assertIsNone(fixedip.instance) self.assertFalse(network_get.called) self.assertFalse(instance_get.called) @mock.patch('nova.db.fixed_ip_get_by_floating_address') def test_get_by_floating_address(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context, '1.2.3.4') get.assert_called_once_with(self.context, '1.2.3.4') self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get_by_floating_address') def test_get_by_floating_address_none(self, get): get.return_value = None fixedip = fixed_ip.FixedIP.get_by_floating_address(self.context, '1.2.3.4') get.assert_called_once_with(self.context, '1.2.3.4') self.assertIsNone(fixedip) @mock.patch('nova.db.fixed_ip_get_by_network_host') def test_get_by_network_and_host(self, get): get.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.get_by_network_and_host(self.context, 123, 'host') get.assert_called_once_with(self.context, 123, 'host') self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_associate') def test_associate(self, associate): associate.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.associate(self.context, '1.2.3.4', 'fake-uuid') associate.assert_called_with(self.context, '1.2.3.4', 'fake-uuid', network_id=None, reserved=False) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_associate_pool') def test_associate_pool(self, associate): associate.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP.associate_pool(self.context, 123, 'fake-uuid', 'host') associate.assert_called_with(self.context, 123, instance_uuid='fake-uuid', host='host') self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_disassociate') def test_disassociate_by_address(self, disassociate): fixed_ip.FixedIP.disassociate_by_address(self.context, '1.2.3.4') disassociate.assert_called_with(self.context, '1.2.3.4') @mock.patch('nova.db.fixed_ip_disassociate_all_by_timeout') def test_disassociate_all_by_timeout(self, disassociate): now = timeutils.utcnow() now_tz = timeutils.parse_isotime( timeutils.isotime(now)).replace( tzinfo=iso8601.iso8601.Utc()) disassociate.return_value = 123 result = fixed_ip.FixedIP.disassociate_all_by_timeout(self.context, 'host', now) self.assertEqual(123, result) # NOTE(danms): be pedantic about timezone stuff args, kwargs = disassociate.call_args_list[0] self.assertEqual(now_tz, args[2]) self.assertEqual((self.context, 'host'), args[:2]) self.assertEqual({}, kwargs) @mock.patch('nova.db.fixed_ip_create') def test_create(self, create): create.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4') fixedip.create() create.assert_called_once_with( self.context, {'address': '1.2.3.4'}) self._compare(fixedip, fake_fixed_ip) @mock.patch('nova.db.fixed_ip_update') def test_save(self, update): update.return_value = fake_fixed_ip fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4', instance_uuid='fake-uuid') self.assertRaises(exception.ObjectActionError, fixedip.save) fixedip.obj_reset_changes(['address']) fixedip.save() update.assert_called_once_with(self.context, '1.2.3.4', {'instance_uuid': 'fake-uuid'}) @mock.patch('nova.db.fixed_ip_disassociate') def test_disassociate(self, disassociate): fixedip = fixed_ip.FixedIP(context=self.context, address='1.2.3.4', instance_uuid='fake-uuid') fixedip.obj_reset_changes() fixedip.disassociate() disassociate.assert_called_once_with(self.context, '1.2.3.4') self.assertIsNone(fixedip.instance_uuid) @mock.patch('nova.db.fixed_ip_get_all') def test_get_all(self, get_all): get_all.return_value = [fake_fixed_ip] fixedips = fixed_ip.FixedIPList.get_all(self.context) self.assertEqual(1, len(fixedips)) get_all.assert_called_once_with(self.context) self._compare(fixedips[0], fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get_by_instance') def test_get_by_instance(self, get): get.return_value = [fake_fixed_ip] fixedips = fixed_ip.FixedIPList.get_by_instance_uuid(self.context, 'fake-uuid') self.assertEqual(1, len(fixedips)) get.assert_called_once_with(self.context, 'fake-uuid') self._compare(fixedips[0], fake_fixed_ip) @mock.patch('nova.db.fixed_ip_get_by_host') def test_get_by_host(self, get): get.return_value = [fake_fixed_ip] fixedips = fixed_ip.FixedIPList.get_by_host(self.context, 'host') self.assertEqual(1, len(fixedips)) get.assert_called_once_with(self.context, 'host') self._compare(fixedips[0], fake_fixed_ip) @mock.patch('nova.db.fixed_ips_by_virtual_interface') def test_get_by_virtual_interface_id(self, get): get.return_value = [fake_fixed_ip] fixedips = fixed_ip.FixedIPList.get_by_virtual_interface_id( self.context, 123) self.assertEqual(1, len(fixedips)) get.assert_called_once_with(self.context, 123) self._compare(fixedips[0], fake_fixed_ip) def test_floating_ips_do_not_lazy_load(self): fixedip = fixed_ip.FixedIP() self.assertRaises(NotImplementedError, lambda: fixedip.floating_ips) @mock.patch('nova.db.fixed_ip_bulk_create') def test_bulk_create(self, bulk): fixed_ips = [fixed_ip.FixedIP(address='192.168.1.1'), fixed_ip.FixedIP(address='192.168.1.2')] fixed_ip.FixedIPList.bulk_create(self.context, fixed_ips) bulk.assert_called_once_with(self.context, [{'address': '192.168.1.1'}, {'address': '192.168.1.2'}]) @mock.patch('nova.db.network_get_associated_fixed_ips') def test_get_by_network(self, get): info = {'address': '1.2.3.4', 'instance_uuid': 'fake-uuid', 'network_id': 0, 'vif_id': 1, 'vif_address': 'de:ad:be:ee:f0:00', 'instance_hostname': 'fake-host', 'instance_updated': datetime.datetime(1955, 11, 5), 'instance_created': datetime.datetime(1955, 11, 5), 'allocated': True, 'leased': True, 'default_route': True, } get.return_value = [info] fixed_ips = fixed_ip.FixedIPList.get_by_network( self.context, {'id': 0}, host='fake-host') get.assert_called_once_with(self.context, 0, host='fake-host') self.assertEqual(1, len(fixed_ips)) fip = fixed_ips[0] self.assertEqual('1.2.3.4', str(fip.address)) self.assertEqual('fake-uuid', fip.instance_uuid) self.assertEqual(0, fip.network_id) self.assertEqual(1, fip.virtual_interface_id) self.assertTrue(fip.allocated) self.assertTrue(fip.leased) self.assertEqual('fake-uuid', fip.instance.uuid) self.assertEqual('fake-host', fip.instance.hostname) self.assertIsInstance(fip.instance.created_at, datetime.datetime) self.assertIsInstance(fip.instance.updated_at, datetime.datetime) self.assertEqual(1, fip.virtual_interface.id) self.assertEqual(info['vif_address'], fip.virtual_interface.address) @mock.patch('nova.db.network_get_associated_fixed_ips') def test_backport_default_route(self, mock_get): info = {'address': '1.2.3.4', 'instance_uuid': 'fake-uuid', 'network_id': 0, 'vif_id': 1, 'vif_address': 'de:ad:be:ee:f0:00', 'instance_hostname': 'fake-host', 'instance_updated': datetime.datetime(1955, 11, 5), 'instance_created': datetime.datetime(1955, 11, 5), 'allocated': True, 'leased': True, 'default_route': True, } mock_get.return_value = [info] fixed_ips = fixed_ip.FixedIPList.get_by_network( self.context, {'id': 0}, host='fake-host') primitive = fixed_ips[0].obj_to_primitive() self.assertIn('default_route', primitive['nova_object.data']) fixed_ips[0].obj_make_compatible(primitive['nova_object.data'], '1.1') self.assertNotIn('default_route', primitive['nova_object.data']) class TestFixedIPObject(test_objects._LocalTest, _TestFixedIPObject): pass class TestRemoteFixedIPObject(test_objects._RemoteTest, _TestFixedIPObject): pass
rocky1001/PiBox
refs/heads/master
PiBox/PiHome/PiApp/forms.py
4
#coding=utf-8 ''' # The modules contains PiApp's forms # Any issues or improvements please contact jacob-chen@iotwrt.com ''' #/usr/bin/python #coding: utf8 from django import forms from models import * class PiSettingsForm(forms.ModelForm): class Meta: model = PiSettings fields = ('ip','port', 'enable_register') def __init__(self, *args, **kwargs): super(PiSettingsForm, self).__init__(*args, **kwargs) self.fields['ip'].widget.attrs.update({'class' : 'form-control'}) self.fields['port'].widget.attrs.update({'class' : 'form-control'}) self.fields['enable_register'].widget.attrs.update({'class' : 'form-control'}) class PiAccountForm(forms.ModelForm): class Meta: model = PiUser fields = ('first_name','last_name') password1 = forms.CharField(required=False, widget=forms.PasswordInput,label='Password', initial="") password2 = forms.CharField(required=False, widget=forms.PasswordInput,label='Confirm password', initial="") def clean(self): cleaned_data = super(PiAccountForm, self).clean() password1 = cleaned_data.get("password1") password2 = cleaned_data.get("password2") if password1 and password2: if password1!=password2: msg = u"Password not Same" self._errors["password2"] = self.error_class([msg]) return cleaned_data class PiRegisterForm(forms.ModelForm): class Meta: model = PiUser fields = ('first_name','last_name','email') password1 = forms.CharField(widget=forms.PasswordInput,label='Password') password2 = forms.CharField(widget=forms.PasswordInput,label='Confirm password') def clean(self): cleaned_data = super(PiRegisterForm, self).clean() password1 = cleaned_data.get("password1") password2 = cleaned_data.get("password2") if password1 and password2: if password1!=password2: msg = u"Password not Same" self._errors["password2"] = self.error_class([msg]) return cleaned_data class DeviceForm(forms.ModelForm): class Meta: model = Device fields = ('name','describe','location','x','y') def __init__(self, *args, **kwargs): super(DeviceForm, self).__init__(*args, **kwargs) self.fields['name'].widget.attrs.update({'class' : 'form-control'}) self.fields['describe'].widget.attrs.update({'class' : 'form-control'}) self.fields['location'].widget.attrs.update({'class' : 'form-control'}) self.fields['x'].widget.attrs.update({'class' : 'form-control'}) self.fields['y'].widget.attrs.update({'class' : 'form-control'}) class HomeForm(forms.ModelForm): class Meta: model = Home fields = ('name','img',) def __init__(self, *args, **kwargs): super(HomeForm, self).__init__(*args, **kwargs) self.fields['name'].widget.attrs.update({'class' : 'form-control'}) class SensorForm(forms.ModelForm): class Meta: model = Sensor fields = ('name','describe','sensor_class','unit') def __init__(self, *args, **kwargs): super(SensorForm, self).__init__(*args, **kwargs) self.fields['name'].widget.attrs.update({'class' : 'form-control'}) self.fields['describe'].widget.attrs.update({'class' : 'form-control'}) self.fields['sensor_class'].widget.attrs.update({'class' : 'form-control'}) self.fields['unit'].widget.attrs.update({'class' : 'form-control'})
vmadura/salt-integration-testing
refs/heads/master
tests/sit/ecs_container_test.py
1
import unittest from mock import MagicMock from sit.ecs_container import Container from helpers.sit_helper import SITHelper class UserDataTest(unittest.TestCase): def setUp(self): configs_directory = 'tests/sit/configs' self.container = Container(configs_directory) self.container.master_ip = '1.2.3.4' self.container.env = 'qa' self.container.family = 'test' self.container.role = 'unit' self.container.MEMORY = 10 self.container.sit_helper = SITHelper(configs_directory) self.container.sit_helper.get_states_for_role = MagicMock(return_value=['server', 'php']) def test_environment_dictionary(self): result = self.container.get_environment_dictionary('test', 'value') self.assertEquals(result, {"name": 'test', "value": 'value'}) def test_get_environment_variables(self): result = self.container.get_container_definitions() expected_answer = {'memoryReservation': 256, 'name': 'test', 'image': 'dandb/salt_review:2015-8-7', 'environment': [{'name': 'roles', 'value': 'server,php'}, {'name': 'env', 'value': 'qa'}, {'name': 'master', 'value': '1.2.3.4'}, {'name': 'minion_id', 'value': 'test'}], 'memory': 10, 'cpu': 512} self.assertEquals(result, expected_answer)
GoogleCloudPlatform/appengine-mapreduce
refs/heads/master
python/src/mapreduce/api/map_job/input_reader.py
5
#!/usr/bin/env python # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Input Reader interface for map job.""" from mapreduce import errors from mapreduce import json_util from mapreduce import shard_life_cycle # pylint: disable=protected-access # pylint: disable=invalid-name class InputReader(shard_life_cycle._ShardLifeCycle, json_util.JsonMixin): """Abstract base class for input readers. InputReader's lifecycle: 1. validate() is called to validate JobConfig. 2. split_input is called to split inputs based on map_job.JobConfig. The class method creates a set of InputReader instances. 3. beging_shard/end_shard/begin_slice/end_slice are called at the time implied by the names. 4. next() is called by each shard on each instance. The output of next() is fed into JobConfig.mapper instance. 5. to_json()/from_json() are used to persist reader's state across multiple slices. """ # Counters. # Bytes read. COUNTER_IO_READ_BYTE = "io-read-byte" # Milliseconds spent reading data. COUNTER_IO_READ_MSEC = "io-read-msec" def __init__(self): self._slice_ctx = None def __iter__(self): return self def next(self): """Returns the next input from this input reader. Returns: The next input read by this input reader. The return value is fed into mapper. Raises: StopIteration when no more item is left. """ raise NotImplementedError("next() not implemented in %s" % self.__class__) @classmethod def from_json(cls, state): """Creates an instance of the InputReader for the given state. Args: state: The InputReader state as returned by to_json. Returns: An instance of the InputReader that can resume iteration. """ raise NotImplementedError("from_json() not implemented in %s" % cls) def to_json(self): """Returns input reader state for the remaining inputs. Returns: A json-serializable state for the InputReader. """ raise NotImplementedError("to_json() not implemented in %s" % self.__class__) @classmethod def split_input(cls, job_config): """Returns an iterator of input readers. This method returns a container of input readers, one for each shard. The container must have __iter__ defined. http://docs.python.org/2/reference/datamodel.html#object.__iter__ This method should try to split inputs among readers evenly. Args: job_config: an instance of map_job.JobConfig. Returns: An iterator of input readers. """ raise NotImplementedError("split_input() not implemented in %s" % cls) @classmethod def validate(cls, job_config): """Validates relevant parameters. This method can validate fields which it deems relevant. Args: job_config: an instance of map_job.JobConfig. Raises: errors.BadReaderParamsError: required parameters are missing or invalid. """ if job_config.input_reader_cls != cls: raise errors.BadReaderParamsError( "Expect input reader class %r, got %r." % (cls, job_config.input_reader_cls)) def begin_slice(self, slice_ctx): """Keeps an internal reference to slice_ctx. Args: slice_ctx: SliceContext singleton instance for this slice. """ self._slice_ctx = slice_ctx def end_slice(self, slice_ctx): """Drops the internal reference to slice_ctx. Args: slice_ctx: SliceContext singleton instance for this slice. """ self._slice_ctx = None @classmethod def params_to_json(cls, params): """Translates JobConfig.input_reader_params to json serializable format. For most reader, this may be an identity transformation. Args: params: JobConfig.input_reader_params. Returns: The json serializable format of params. """ return params @classmethod def params_from_json(cls, json_params): """Reverse function of params_to_json.""" return json_params
gimite/personfinder
refs/heads/master
app/vendors/chardet/euctwprober.py
289
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCTWDistributionAnalysis from .mbcssm import EUCTW_SM_MODEL class EUCTWProber(MultiByteCharSetProber): def __init__(self): super(EUCTWProber, self).__init__() self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL) self.distribution_analyzer = EUCTWDistributionAnalysis() self.reset() @property def charset_name(self): return "EUC-TW" @property def language(self): return "Taiwan"
digitalocean/netbox
refs/heads/develop
netbox/virtualization/models.py
1
from django.conf import settings from django.contrib.contenttypes.fields import GenericRelation from django.core.exceptions import ValidationError from django.db import models from django.urls import reverse from taggit.managers import TaggableManager from dcim.models import BaseInterface, Device from extras.models import ChangeLoggedModel, ConfigContextModel, CustomFieldModel, ObjectChange, TaggedItem from extras.querysets import ConfigContextModelQuerySet from extras.utils import extras_features from utilities.fields import NaturalOrderingField from utilities.ordering import naturalize_interface from utilities.query_functions import CollateAsChar from utilities.querysets import RestrictedQuerySet from utilities.utils import serialize_object from .choices import * __all__ = ( 'Cluster', 'ClusterGroup', 'ClusterType', 'VirtualMachine', 'VMInterface', ) # # Cluster types # class ClusterType(ChangeLoggedModel): """ A type of Cluster. """ name = models.CharField( max_length=100, unique=True ) slug = models.SlugField( max_length=100, unique=True ) description = models.CharField( max_length=200, blank=True ) objects = RestrictedQuerySet.as_manager() csv_headers = ['name', 'slug', 'description'] class Meta: ordering = ['name'] def __str__(self): return self.name def get_absolute_url(self): return "{}?type={}".format(reverse('virtualization:cluster_list'), self.slug) def to_csv(self): return ( self.name, self.slug, self.description, ) # # Cluster groups # class ClusterGroup(ChangeLoggedModel): """ An organizational group of Clusters. """ name = models.CharField( max_length=100, unique=True ) slug = models.SlugField( max_length=100, unique=True ) description = models.CharField( max_length=200, blank=True ) objects = RestrictedQuerySet.as_manager() csv_headers = ['name', 'slug', 'description'] class Meta: ordering = ['name'] def __str__(self): return self.name def get_absolute_url(self): return "{}?group={}".format(reverse('virtualization:cluster_list'), self.slug) def to_csv(self): return ( self.name, self.slug, self.description, ) # # Clusters # @extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks') class Cluster(ChangeLoggedModel, CustomFieldModel): """ A cluster of VirtualMachines. Each Cluster may optionally be associated with one or more Devices. """ name = models.CharField( max_length=100, unique=True ) type = models.ForeignKey( to=ClusterType, on_delete=models.PROTECT, related_name='clusters' ) group = models.ForeignKey( to=ClusterGroup, on_delete=models.PROTECT, related_name='clusters', blank=True, null=True ) tenant = models.ForeignKey( to='tenancy.Tenant', on_delete=models.PROTECT, related_name='clusters', blank=True, null=True ) site = models.ForeignKey( to='dcim.Site', on_delete=models.PROTECT, related_name='clusters', blank=True, null=True ) comments = models.TextField( blank=True ) tags = TaggableManager(through=TaggedItem) objects = RestrictedQuerySet.as_manager() csv_headers = ['name', 'type', 'group', 'site', 'comments'] clone_fields = [ 'type', 'group', 'tenant', 'site', ] class Meta: ordering = ['name'] def __str__(self): return self.name def get_absolute_url(self): return reverse('virtualization:cluster', args=[self.pk]) def clean(self): super().clean() # If the Cluster is assigned to a Site, verify that all host Devices belong to that Site. if self.pk and self.site: nonsite_devices = Device.objects.filter(cluster=self).exclude(site=self.site).count() if nonsite_devices: raise ValidationError({ 'site': "{} devices are assigned as hosts for this cluster but are not in site {}".format( nonsite_devices, self.site ) }) def to_csv(self): return ( self.name, self.type.name, self.group.name if self.group else None, self.site.name if self.site else None, self.tenant.name if self.tenant else None, self.comments, ) # # Virtual machines # @extras_features('custom_fields', 'custom_links', 'export_templates', 'webhooks') class VirtualMachine(ChangeLoggedModel, ConfigContextModel, CustomFieldModel): """ A virtual machine which runs inside a Cluster. """ cluster = models.ForeignKey( to='virtualization.Cluster', on_delete=models.PROTECT, related_name='virtual_machines' ) tenant = models.ForeignKey( to='tenancy.Tenant', on_delete=models.PROTECT, related_name='virtual_machines', blank=True, null=True ) platform = models.ForeignKey( to='dcim.Platform', on_delete=models.SET_NULL, related_name='virtual_machines', blank=True, null=True ) name = models.CharField( max_length=64 ) status = models.CharField( max_length=50, choices=VirtualMachineStatusChoices, default=VirtualMachineStatusChoices.STATUS_ACTIVE, verbose_name='Status' ) role = models.ForeignKey( to='dcim.DeviceRole', on_delete=models.PROTECT, related_name='virtual_machines', limit_choices_to={'vm_role': True}, blank=True, null=True ) primary_ip4 = models.OneToOneField( to='ipam.IPAddress', on_delete=models.SET_NULL, related_name='+', blank=True, null=True, verbose_name='Primary IPv4' ) primary_ip6 = models.OneToOneField( to='ipam.IPAddress', on_delete=models.SET_NULL, related_name='+', blank=True, null=True, verbose_name='Primary IPv6' ) vcpus = models.PositiveSmallIntegerField( blank=True, null=True, verbose_name='vCPUs' ) memory = models.PositiveIntegerField( blank=True, null=True, verbose_name='Memory (MB)' ) disk = models.PositiveIntegerField( blank=True, null=True, verbose_name='Disk (GB)' ) comments = models.TextField( blank=True ) secrets = GenericRelation( to='secrets.Secret', content_type_field='assigned_object_type', object_id_field='assigned_object_id', related_query_name='virtual_machine' ) tags = TaggableManager(through=TaggedItem) objects = ConfigContextModelQuerySet.as_manager() csv_headers = [ 'name', 'status', 'role', 'cluster', 'tenant', 'platform', 'vcpus', 'memory', 'disk', 'comments', ] clone_fields = [ 'cluster', 'tenant', 'platform', 'status', 'role', 'vcpus', 'memory', 'disk', ] class Meta: ordering = ('name', 'pk') # Name may be non-unique unique_together = [ ['cluster', 'tenant', 'name'] ] def __str__(self): return self.name def get_absolute_url(self): return reverse('virtualization:virtualmachine', args=[self.pk]) def validate_unique(self, exclude=None): # Check for a duplicate name on a VM assigned to the same Cluster and no Tenant. This is necessary # because Django does not consider two NULL fields to be equal, and thus will not trigger a violation # of the uniqueness constraint without manual intervention. if self.tenant is None and VirtualMachine.objects.exclude(pk=self.pk).filter( name=self.name, cluster=self.cluster, tenant__isnull=True ): raise ValidationError({ 'name': 'A virtual machine with this name already exists in the assigned cluster.' }) super().validate_unique(exclude) def clean(self): super().clean() # Validate primary IP addresses interfaces = self.interfaces.all() for field in ['primary_ip4', 'primary_ip6']: ip = getattr(self, field) if ip is not None: if ip.assigned_object in interfaces: pass elif ip.nat_inside is not None and ip.nat_inside.assigned_object in interfaces: pass else: raise ValidationError({ field: f"The specified IP address ({ip}) is not assigned to this VM.", }) def to_csv(self): return ( self.name, self.get_status_display(), self.role.name if self.role else None, self.cluster.name, self.tenant.name if self.tenant else None, self.platform.name if self.platform else None, self.vcpus, self.memory, self.disk, self.comments, ) def get_status_class(self): return VirtualMachineStatusChoices.CSS_CLASSES.get(self.status) @property def primary_ip(self): if settings.PREFER_IPV4 and self.primary_ip4: return self.primary_ip4 elif self.primary_ip6: return self.primary_ip6 elif self.primary_ip4: return self.primary_ip4 else: return None @property def site(self): return self.cluster.site # # Interfaces # @extras_features('export_templates', 'webhooks') class VMInterface(BaseInterface): virtual_machine = models.ForeignKey( to='virtualization.VirtualMachine', on_delete=models.CASCADE, related_name='interfaces' ) name = models.CharField( max_length=64 ) _name = NaturalOrderingField( target_field='name', naturalize_function=naturalize_interface, max_length=100, blank=True ) description = models.CharField( max_length=200, blank=True ) untagged_vlan = models.ForeignKey( to='ipam.VLAN', on_delete=models.SET_NULL, related_name='vminterfaces_as_untagged', null=True, blank=True, verbose_name='Untagged VLAN' ) tagged_vlans = models.ManyToManyField( to='ipam.VLAN', related_name='vminterfaces_as_tagged', blank=True, verbose_name='Tagged VLANs' ) ip_addresses = GenericRelation( to='ipam.IPAddress', content_type_field='assigned_object_type', object_id_field='assigned_object_id', related_query_name='vminterface' ) tags = TaggableManager( through=TaggedItem, related_name='vminterface' ) objects = RestrictedQuerySet.as_manager() csv_headers = [ 'virtual_machine', 'name', 'enabled', 'mac_address', 'mtu', 'description', 'mode', ] class Meta: verbose_name = 'interface' ordering = ('virtual_machine', CollateAsChar('_name')) unique_together = ('virtual_machine', 'name') def __str__(self): return self.name def get_absolute_url(self): return reverse('virtualization:vminterface', kwargs={'pk': self.pk}) def to_csv(self): return ( self.virtual_machine.name, self.name, self.enabled, self.mac_address, self.mtu, self.description, self.get_mode_display(), ) def clean(self): super().clean() # Validate untagged VLAN if self.untagged_vlan and self.untagged_vlan.site not in [self.virtual_machine.site, None]: raise ValidationError({ 'untagged_vlan': f"The untagged VLAN ({self.untagged_vlan}) must belong to the same site as the " f"interface's parent virtual machine, or it must be global" }) def to_objectchange(self, action): # Annotate the parent VirtualMachine return ObjectChange( changed_object=self, object_repr=str(self), action=action, related_object=self.virtual_machine, object_data=serialize_object(self) ) @property def parent(self): return self.virtual_machine @property def count_ipaddresses(self): return self.ip_addresses.count()
wwiitanen/Catlang
refs/heads/master
cat/stack.py
2
""" Basic stack functionality. """ from collections import deque class Stack: def __init__(self, initial=None): """ >>> s = Stack() >>> s _empty_ >>> s = Stack([1,2,3]) >>> s ===> 1 2 3 """ if initial is None: initial = [] self._stack = deque(initial) self._aux = deque() def push(self, what, multi=False): """ >>> s = Stack() >>> s.push(1) >>> s ===> 1 >>> s.push([2, 3], multi=True) >>> s ===> 1 2 3 >>> s.push([4, 5]) >>> s ===> 1 2 3 [4, 5] """ if multi: self._stack.extend(what) else: self._stack.append(what) def pop(self): """ >>> s = Stack([1]) >>> s.pop() 1 >>> s _empty_ """ return self._stack.pop() def push_aux( self, what, multi=False ) : ''' ''' if multi : self._aux.extend( what ) else : self._aux.append( what ) def pop_aux( self, n=1 ) : ''' ''' if n == 1 : return self._aux.pop() return [self._aux.pop() for _ in range(n)] def pop_list(self): """ >>> s = Stack(['a,b,c']) >>> s.pop_list() ['a', 'b', 'c'] """ item = self.pop() if isinstance(item, basestring): item = item.strip() if item[0] in "[(" : return eval( item ) else : return [x for x in item.split(',') if x] elif isinstance(item, (list, tuple)): return item else: return [item] def __repr__(self): if not self._stack: return '===> _empty_' return '===> %s' % ' '.join(repr(x) for x in self._stack) def peek(self): """ >>> s = Stack([1, 2]) >>> s.peek() 2 >>> s ===> 1 2 """ return self._stack[-1] def peek_n(self, n): """ >>> s = Stack([1, 2, 4, 8, 16]) >>> s.peek_n(3) 2 """ return self._stack[-1 - n] def pop_2(self): """ >>> s = Stack([2, 4, 6]) >>> s.pop_2() (6, 4) """ return self._stack.pop(), self._stack.pop() def pop_n(self, n): """ >>> s = Stack([3, 4, 5, 6]) >>> s.pop_n(3) [6, 5, 4] >>> s ===> 3 """ return list([self._stack.pop() for _ in range(n)]) def pop_all(self): """ >>> s = Stack([9, 8, 7]) >>> s.pop_all() [7, 8, 9] >>> s _empty_ """ return self.pop_n(self.length()) def length(self): """ >>> s = Stack([1, 2, 3]) >>> s.length() 3 """ return len(self._stack) def clear(self): """ >>> s = Stack([20, 30, 40]) >>> s.clear() >>> s _empty_ """ self._stack.clear() def clear_to(self, n): """ >>> s = Stack([90, 40, 10, 1]) >>> s.clear_to(3) >>> s ===> 90 """ for _ in range(n): self._stack.pop() def reverse( self ) : '''Reverses the order of elements in the stack''' self._stack.reverse() def remove( self, item ) : '''Removes first occurrence of 'item' from the stack''' self._stack.remove( item ) def raw( self ) : return self._stack def to_list(self): return list(self._stack) def __getitem__(self, index): return self._stack[index] def __setitem__(self, index, value): self._stack[index] = value def format( self, alt_format=False ) : '''Format the stack contents for printing :param format: choose one line output (default); or multi-line output :type format: boolean :rtype: string ''' n = len( self._stack ) ix = 0 if not n : return "===> _empty_" elif alt_format : txt = "stack" for i in range( 1, n + 1 ) : if n > 9 : txt += "[% 3d]: %s\n " % (ix, str(self._stack[n-i])) else : txt += "[% 2d]: %s\n " % (ix, str(self._stack[n-i])) ix -= 1 return txt.strip() else : items = [str(x) for x in self._stack] return "===> " + " ".join(items)
ioram7/keystone-federado-pgid2013
refs/heads/master
build/sqlalchemy/test/orm/test_immediate_load.py
2
"""basic tests of lazy loaded attributes""" from test.lib import testing from sqlalchemy.orm import mapper, relationship, create_session, immediateload from test.lib.testing import eq_ from test.orm import _fixtures class ImmediateTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None def test_basic_option(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties={ 'addresses':relationship(Address) }) sess = create_session() l = sess.query(User).options(immediateload(User.addresses)).filter(users.c.id==7).all() eq_(len(sess.identity_map), 2) sess.close() eq_( [User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])], l ) def test_basic(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties={ 'addresses':relationship(Address, lazy='immediate') }) sess = create_session() l = sess.query(User).filter(users.c.id==7).all() eq_(len(sess.identity_map), 2) sess.close() eq_( [User(id=7, addresses=[Address(id=1, email_address='jack@bean.com')])], l )
phenoxim/cinder
refs/heads/master
cinder/tests/unit/targets/test_iser_driver.py
3
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from cinder.tests.unit.targets import targets_fixture as tf from cinder import utils from cinder.volume.targets import lio from cinder.volume.targets import tgt class TestIserTgtDriver(tf.TargetDriverFixture): """Unit tests for the iSER TGT flow""" def setUp(self): super(TestIserTgtDriver, self).setUp() self.configuration.target_protocol = 'iser' self.target = tgt.TgtAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) def test_iscsi_protocol(self): self.assertEqual('iser', self.target.iscsi_protocol) @mock.patch.object(tgt.TgtAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi): connector = {'initiator': 'fake_init'} mock_get_iscsi.return_value = {} expected_return = {'driver_volume_type': 'iser', 'data': {}} self.assertEqual(expected_return, self.target.initialize_connection(self.testvol, connector)) class TestIserLioAdmDriver(tf.TargetDriverFixture): """Unit tests for the iSER LIO flow""" def setUp(self): super(TestIserLioAdmDriver, self).setUp() self.configuration.target_protocol = 'iser' with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'}) def test_iscsi_protocol(self): self.assertEqual('iser', self.target.iscsi_protocol) @mock.patch('cinder.utils.execute') @mock.patch.object(lio.LioAdm, '_get_iscsi_properties') def test_initialize_connection(self, mock_get_iscsi, mock_execute): connector = {'initiator': 'fake_init'} mock_get_iscsi.return_value = {} ret = self.target.initialize_connection(self.testvol, connector) driver_volume_type = ret['driver_volume_type'] self.assertEqual('iser', driver_volume_type)
Denisolt/IEEE-NYIT-MA
refs/heads/master
local/lib/python2.7/site-packages/pip/index.py
336
"""Routines related to PyPI, indexes""" from __future__ import absolute_import import logging import cgi from collections import namedtuple import itertools import sys import os import re import mimetypes import posixpath import warnings from pip._vendor.six.moves.urllib import parse as urllib_parse from pip._vendor.six.moves.urllib import request as urllib_request from pip.compat import ipaddress from pip.utils import ( cached_property, splitext, normalize_path, ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, ) from pip.utils.deprecation import RemovedInPip10Warning from pip.utils.logging import indent_log from pip.utils.packaging import check_requires_python from pip.exceptions import ( DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename, UnsupportedWheel, ) from pip.download import HAS_TLS, is_url, path_to_url, url_to_path from pip.wheel import Wheel, wheel_ext from pip.pep425tags import get_supported from pip._vendor import html5lib, requests, six from pip._vendor.packaging.version import parse as parse_version from pip._vendor.packaging.utils import canonicalize_name from pip._vendor.packaging import specifiers from pip._vendor.requests.exceptions import SSLError from pip._vendor.distlib.compat import unescape __all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder'] SECURE_ORIGINS = [ # protocol, hostname, port # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) ("https", "*", "*"), ("*", "localhost", "*"), ("*", "127.0.0.0/8", "*"), ("*", "::1/128", "*"), ("file", "*", None), # ssh is always secure. ("ssh", "*", "*"), ] logger = logging.getLogger(__name__) class InstallationCandidate(object): def __init__(self, project, version, location): self.project = project self.version = parse_version(version) self.location = location self._key = (self.project, self.version, self.location) def __repr__(self): return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format( self.project, self.version, self.location, ) def __hash__(self): return hash(self._key) def __lt__(self, other): return self._compare(other, lambda s, o: s < o) def __le__(self, other): return self._compare(other, lambda s, o: s <= o) def __eq__(self, other): return self._compare(other, lambda s, o: s == o) def __ge__(self, other): return self._compare(other, lambda s, o: s >= o) def __gt__(self, other): return self._compare(other, lambda s, o: s > o) def __ne__(self, other): return self._compare(other, lambda s, o: s != o) def _compare(self, other, method): if not isinstance(other, InstallationCandidate): return NotImplemented return method(self._key, other._key) class PackageFinder(object): """This finds packages. This is meant to match easy_install's technique for looking for packages, by reading pages and looking for appropriate links. """ def __init__(self, find_links, index_urls, allow_all_prereleases=False, trusted_hosts=None, process_dependency_links=False, session=None, format_control=None, platform=None, versions=None, abi=None, implementation=None): """Create a PackageFinder. :param format_control: A FormatControl object or None. Used to control the selection of source packages / binary packages when consulting the index and links. :param platform: A string or None. If None, searches for packages that are supported by the current system. Otherwise, will find packages that can be built on the platform passed in. These packages will only be downloaded for distribution: they will not be built locally. :param versions: A list of strings or None. This is passed directly to pep425tags.py in the get_supported() method. :param abi: A string or None. This is passed directly to pep425tags.py in the get_supported() method. :param implementation: A string or None. This is passed directly to pep425tags.py in the get_supported() method. """ if session is None: raise TypeError( "PackageFinder() missing 1 required keyword argument: " "'session'" ) # Build find_links. If an argument starts with ~, it may be # a local file relative to a home directory. So try normalizing # it and if it exists, use the normalized version. # This is deliberately conservative - it might be fine just to # blindly normalize anything starting with a ~... self.find_links = [] for link in find_links: if link.startswith('~'): new_link = normalize_path(link) if os.path.exists(new_link): link = new_link self.find_links.append(link) self.index_urls = index_urls self.dependency_links = [] # These are boring links that have already been logged somehow: self.logged_links = set() self.format_control = format_control or FormatControl(set(), set()) # Domains that we won't emit warnings for when not using HTTPS self.secure_origins = [ ("*", host, "*") for host in (trusted_hosts if trusted_hosts else []) ] # Do we want to allow _all_ pre-releases? self.allow_all_prereleases = allow_all_prereleases # Do we process dependency links? self.process_dependency_links = process_dependency_links # The Session we'll use to make requests self.session = session # The valid tags to check potential found wheel candidates against self.valid_tags = get_supported( versions=versions, platform=platform, abi=abi, impl=implementation, ) # If we don't have TLS enabled, then WARN if anyplace we're looking # relies on TLS. if not HAS_TLS: for link in itertools.chain(self.index_urls, self.find_links): parsed = urllib_parse.urlparse(link) if parsed.scheme == "https": logger.warning( "pip is configured with locations that require " "TLS/SSL, however the ssl module in Python is not " "available." ) break def add_dependency_links(self, links): # # FIXME: this shouldn't be global list this, it should only # # apply to requirements of the package that specifies the # # dependency_links value # # FIXME: also, we should track comes_from (i.e., use Link) if self.process_dependency_links: warnings.warn( "Dependency Links processing has been deprecated and will be " "removed in a future release.", RemovedInPip10Warning, ) self.dependency_links.extend(links) @staticmethod def _sort_locations(locations, expand_dir=False): """ Sort locations into "files" (archives) and "urls", and return a pair of lists (files,urls) """ files = [] urls = [] # puts the url for the given file path into the appropriate list def sort_path(path): url = path_to_url(path) if mimetypes.guess_type(url, strict=False)[0] == 'text/html': urls.append(url) else: files.append(url) for url in locations: is_local_path = os.path.exists(url) is_file_url = url.startswith('file:') if is_local_path or is_file_url: if is_local_path: path = url else: path = url_to_path(url) if os.path.isdir(path): if expand_dir: path = os.path.realpath(path) for item in os.listdir(path): sort_path(os.path.join(path, item)) elif is_file_url: urls.append(url) elif os.path.isfile(path): sort_path(path) else: logger.warning( "Url '%s' is ignored: it is neither a file " "nor a directory.", url) elif is_url(url): # Only add url with clear scheme urls.append(url) else: logger.warning( "Url '%s' is ignored. It is either a non-existing " "path or lacks a specific scheme.", url) return files, urls def _candidate_sort_key(self, candidate): """ Function used to generate link sort key for link tuples. The greater the return value, the more preferred it is. If not finding wheels, then sorted by version only. If finding wheels, then the sort order is by version, then: 1. existing installs 2. wheels ordered via Wheel.support_index_min(self.valid_tags) 3. source archives Note: it was considered to embed this logic into the Link comparison operators, but then different sdist links with the same version, would have to be considered equal """ support_num = len(self.valid_tags) if candidate.location.is_wheel: # can raise InvalidWheelFilename wheel = Wheel(candidate.location.filename) if not wheel.supported(self.valid_tags): raise UnsupportedWheel( "%s is not a supported wheel for this platform. It " "can't be sorted." % wheel.filename ) pri = -(wheel.support_index_min(self.valid_tags)) else: # sdist pri = -(support_num) return (candidate.version, pri) def _validate_secure_origin(self, logger, location): # Determine if this url used a secure transport mechanism parsed = urllib_parse.urlparse(str(location)) origin = (parsed.scheme, parsed.hostname, parsed.port) # The protocol to use to see if the protocol matches. # Don't count the repository type as part of the protocol: in # cases such as "git+ssh", only use "ssh". (I.e., Only verify against # the last scheme.) protocol = origin[0].rsplit('+', 1)[-1] # Determine if our origin is a secure origin by looking through our # hardcoded list of secure origins, as well as any additional ones # configured on this PackageFinder instance. for secure_origin in (SECURE_ORIGINS + self.secure_origins): if protocol != secure_origin[0] and secure_origin[0] != "*": continue try: # We need to do this decode dance to ensure that we have a # unicode object, even on Python 2.x. addr = ipaddress.ip_address( origin[1] if ( isinstance(origin[1], six.text_type) or origin[1] is None ) else origin[1].decode("utf8") ) network = ipaddress.ip_network( secure_origin[1] if isinstance(secure_origin[1], six.text_type) else secure_origin[1].decode("utf8") ) except ValueError: # We don't have both a valid address or a valid network, so # we'll check this origin against hostnames. if (origin[1] and origin[1].lower() != secure_origin[1].lower() and secure_origin[1] != "*"): continue else: # We have a valid address and network, so see if the address # is contained within the network. if addr not in network: continue # Check to see if the port patches if (origin[2] != secure_origin[2] and secure_origin[2] != "*" and secure_origin[2] is not None): continue # If we've gotten here, then this origin matches the current # secure origin and we should return True return True # If we've gotten to this point, then the origin isn't secure and we # will not accept it as a valid location to search. We will however # log a warning that we are ignoring it. logger.warning( "The repository located at %s is not a trusted or secure host and " "is being ignored. If this repository is available via HTTPS it " "is recommended to use HTTPS instead, otherwise you may silence " "this warning and allow it anyways with '--trusted-host %s'.", parsed.hostname, parsed.hostname, ) return False def _get_index_urls_locations(self, project_name): """Returns the locations found via self.index_urls Checks the url_name on the main (first in the list) index and use this url_name to produce all locations """ def mkurl_pypi_url(url): loc = posixpath.join( url, urllib_parse.quote(canonicalize_name(project_name))) # For maximum compatibility with easy_install, ensure the path # ends in a trailing slash. Although this isn't in the spec # (and PyPI can handle it without the slash) some other index # implementations might break if they relied on easy_install's # behavior. if not loc.endswith('/'): loc = loc + '/' return loc return [mkurl_pypi_url(url) for url in self.index_urls] def find_all_candidates(self, project_name): """Find all available InstallationCandidate for project_name This checks index_urls, find_links and dependency_links. All versions found are returned as an InstallationCandidate list. See _link_package_versions for details on which files are accepted """ index_locations = self._get_index_urls_locations(project_name) index_file_loc, index_url_loc = self._sort_locations(index_locations) fl_file_loc, fl_url_loc = self._sort_locations( self.find_links, expand_dir=True) dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links) file_locations = ( Link(url) for url in itertools.chain( index_file_loc, fl_file_loc, dep_file_loc) ) # We trust every url that the user has given us whether it was given # via --index-url or --find-links # We explicitly do not trust links that came from dependency_links # We want to filter out any thing which does not have a secure origin. url_locations = [ link for link in itertools.chain( (Link(url) for url in index_url_loc), (Link(url) for url in fl_url_loc), (Link(url) for url in dep_url_loc), ) if self._validate_secure_origin(logger, link) ] logger.debug('%d location(s) to search for versions of %s:', len(url_locations), project_name) for location in url_locations: logger.debug('* %s', location) canonical_name = canonicalize_name(project_name) formats = fmt_ctl_formats(self.format_control, canonical_name) search = Search(project_name, canonical_name, formats) find_links_versions = self._package_versions( # We trust every directly linked archive in find_links (Link(url, '-f') for url in self.find_links), search ) page_versions = [] for page in self._get_pages(url_locations, project_name): logger.debug('Analyzing links from page %s', page.url) with indent_log(): page_versions.extend( self._package_versions(page.links, search) ) dependency_versions = self._package_versions( (Link(url) for url in self.dependency_links), search ) if dependency_versions: logger.debug( 'dependency_links found: %s', ', '.join([ version.location.url for version in dependency_versions ]) ) file_versions = self._package_versions(file_locations, search) if file_versions: file_versions.sort(reverse=True) logger.debug( 'Local files found: %s', ', '.join([ url_to_path(candidate.location.url) for candidate in file_versions ]) ) # This is an intentional priority ordering return ( file_versions + find_links_versions + page_versions + dependency_versions ) def find_requirement(self, req, upgrade): """Try to find a Link matching req Expects req, an InstallRequirement and upgrade, a boolean Returns a Link if found, Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise """ all_candidates = self.find_all_candidates(req.name) # Filter out anything which doesn't match our specifier compatible_versions = set( req.specifier.filter( # We turn the version object into a str here because otherwise # when we're debundled but setuptools isn't, Python will see # packaging.version.Version and # pkg_resources._vendor.packaging.version.Version as different # types. This way we'll use a str as a common data interchange # format. If we stop using the pkg_resources provided specifier # and start using our own, we can drop the cast to str(). [str(c.version) for c in all_candidates], prereleases=( self.allow_all_prereleases if self.allow_all_prereleases else None ), ) ) applicable_candidates = [ # Again, converting to str to deal with debundling. c for c in all_candidates if str(c.version) in compatible_versions ] if applicable_candidates: best_candidate = max(applicable_candidates, key=self._candidate_sort_key) else: best_candidate = None if req.satisfied_by is not None: installed_version = parse_version(req.satisfied_by.version) else: installed_version = None if installed_version is None and best_candidate is None: logger.critical( 'Could not find a version that satisfies the requirement %s ' '(from versions: %s)', req, ', '.join( sorted( set(str(c.version) for c in all_candidates), key=parse_version, ) ) ) raise DistributionNotFound( 'No matching distribution found for %s' % req ) best_installed = False if installed_version and ( best_candidate is None or best_candidate.version <= installed_version): best_installed = True if not upgrade and installed_version is not None: if best_installed: logger.debug( 'Existing installed version (%s) is most up-to-date and ' 'satisfies requirement', installed_version, ) else: logger.debug( 'Existing installed version (%s) satisfies requirement ' '(most up-to-date version is %s)', installed_version, best_candidate.version, ) return None if best_installed: # We have an existing version, and its the best version logger.debug( 'Installed version (%s) is most up-to-date (past versions: ' '%s)', installed_version, ', '.join(sorted(compatible_versions, key=parse_version)) or "none", ) raise BestVersionAlreadyInstalled logger.debug( 'Using version %s (newest of versions: %s)', best_candidate.version, ', '.join(sorted(compatible_versions, key=parse_version)) ) return best_candidate.location def _get_pages(self, locations, project_name): """ Yields (page, page_url) from the given locations, skipping locations that have errors. """ seen = set() for location in locations: if location in seen: continue seen.add(location) page = self._get_page(location) if page is None: continue yield page _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') def _sort_links(self, links): """ Returns elements of links in order, non-egg links first, egg links second, while eliminating duplicates """ eggs, no_eggs = [], [] seen = set() for link in links: if link not in seen: seen.add(link) if link.egg_fragment: eggs.append(link) else: no_eggs.append(link) return no_eggs + eggs def _package_versions(self, links, search): result = [] for link in self._sort_links(links): v = self._link_package_versions(link, search) if v is not None: result.append(v) return result def _log_skipped_link(self, link, reason): if link not in self.logged_links: logger.debug('Skipping link %s; %s', link, reason) self.logged_links.add(link) def _link_package_versions(self, link, search): """Return an InstallationCandidate or None""" version = None if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: egg_info, ext = link.splitext() if not ext: self._log_skipped_link(link, 'not a file') return if ext not in SUPPORTED_EXTENSIONS: self._log_skipped_link( link, 'unsupported archive format: %s' % ext) return if "binary" not in search.formats and ext == wheel_ext: self._log_skipped_link( link, 'No binaries permitted for %s' % search.supplied) return if "macosx10" in link.path and ext == '.zip': self._log_skipped_link(link, 'macosx10 one') return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: self._log_skipped_link(link, 'invalid wheel filename') return if canonicalize_name(wheel.name) != search.canonical: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return if not wheel.supported(self.valid_tags): self._log_skipped_link( link, 'it is not compatible with this Python') return version = wheel.version # This should be up by the search.ok_binary check, but see issue 2700. if "source" not in search.formats and ext != wheel_ext: self._log_skipped_link( link, 'No sources permitted for %s' % search.supplied) return if not version: version = egg_info_matches(egg_info, search.supplied, link) if version is None: self._log_skipped_link( link, 'wrong project name (not %s)' % search.supplied) return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: self._log_skipped_link( link, 'Python version is incorrect') return try: support_this_python = check_requires_python(link.requires_python) except specifiers.InvalidSpecifier: logger.debug("Package %s has an invalid Requires-Python entry: %s", link.filename, link.requires_python) support_this_python = True if not support_this_python: logger.debug("The package %s is incompatible with the python" "version in use. Acceptable python versions are:%s", link, link.requires_python) return logger.debug('Found link %s, version: %s', link, version) return InstallationCandidate(search.supplied, version, link) def _get_page(self, link): return HTMLPage.get_page(link, session=self.session) def egg_info_matches( egg_info, search_name, link, _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)): """Pull the version part out of a string. :param egg_info: The string to parse. E.g. foo-2.1 :param search_name: The name of the package this belongs to. None to infer the name. Note that this cannot unambiguously parse strings like foo-2-2 which might be foo, 2-2 or foo-2, 2. :param link: The link the string came from, for logging on failure. """ match = _egg_info_re.search(egg_info) if not match: logger.debug('Could not parse version from link: %s', link) return None if search_name is None: full_match = match.group(0) return full_match[full_match.index('-'):] name = match.group(0).lower() # To match the "safe" name that pkg_resources creates: name = name.replace('_', '-') # project name and version must be separated by a dash look_for = search_name.lower() + "-" if name.startswith(look_for): return match.group(0)[len(look_for):] else: return None class HTMLPage(object): """Represents one page, along with its URL""" def __init__(self, content, url, headers=None): # Determine if we have any encoding information in our headers encoding = None if headers and "Content-Type" in headers: content_type, params = cgi.parse_header(headers["Content-Type"]) if "charset" in params: encoding = params['charset'] self.content = content self.parsed = html5lib.parse( self.content, transport_encoding=encoding, namespaceHTMLElements=False, ) self.url = url self.headers = headers def __str__(self): return self.url @classmethod def get_page(cls, link, skip_archives=True, session=None): if session is None: raise TypeError( "get_page() missing 1 required keyword argument: 'session'" ) url = link.url url = url.split('#', 1)[0] # Check for VCS schemes that do not support lookup as web pages. from pip.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in '+:': logger.debug('Cannot look at %s URL %s', scheme, link) return None try: if skip_archives: filename = link.filename for bad_ext in ARCHIVE_EXTENSIONS: if filename.endswith(bad_ext): content_type = cls._get_content_type( url, session=session, ) if content_type.lower().startswith('text/html'): break else: logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return logger.debug('Getting page %s', url) # Tack index.html onto file:// URLs that point to directories (scheme, netloc, path, params, query, fragment) = \ urllib_parse.urlparse(url) if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))): # add trailing slash if not present so urljoin doesn't trim # final segment if not url.endswith('/'): url += '/' url = urllib_parse.urljoin(url, 'index.html') logger.debug(' file: URL is directory, getting %s', url) resp = session.get( url, headers={ "Accept": "text/html", "Cache-Control": "max-age=600", }, ) resp.raise_for_status() # The check for archives above only works if the url ends with # something that looks like an archive. However that is not a # requirement of an url. Unless we issue a HEAD request on every # url we cannot know ahead of time for sure if something is HTML # or not. However we can check after we've downloaded it. content_type = resp.headers.get('Content-Type', 'unknown') if not content_type.lower().startswith("text/html"): logger.debug( 'Skipping page %s because of Content-Type: %s', link, content_type, ) return inst = cls(resp.content, resp.url, resp.headers) except requests.HTTPError as exc: cls._handle_fail(link, exc, url) except SSLError as exc: reason = ("There was a problem confirming the ssl certificate: " "%s" % exc) cls._handle_fail(link, reason, url, meth=logger.info) except requests.ConnectionError as exc: cls._handle_fail(link, "connection error: %s" % exc, url) except requests.Timeout: cls._handle_fail(link, "timed out", url) else: return inst @staticmethod def _handle_fail(link, reason, url, meth=None): if meth is None: meth = logger.debug meth("Could not fetch URL %s: %s - skipping", link, reason) @staticmethod def _get_content_type(url, session): """Get the Content-Type of the given url, using a HEAD request""" scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) if scheme not in ('http', 'https'): # FIXME: some warning or something? # assertion error? return '' resp = session.head(url, allow_redirects=True) resp.raise_for_status() return resp.headers.get("Content-Type", "") @cached_property def base_url(self): bases = [ x for x in self.parsed.findall(".//base") if x.get("href") is not None ] if bases and bases[0].get("href"): return bases[0].get("href") else: return self.url @property def links(self): """Yields all links in the page""" for anchor in self.parsed.findall(".//a"): if anchor.get("href"): href = anchor.get("href") url = self.clean_link( urllib_parse.urljoin(self.base_url, href) ) pyrequire = anchor.get('data-requires-python') pyrequire = unescape(pyrequire) if pyrequire else None yield Link(url, self, requires_python=pyrequire) _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) def clean_link(self, url): """Makes sure a link is fully encoded. That is, if a ' ' shows up in the link, it will be rewritten to %20 (while not over-quoting % or other characters).""" return self._clean_re.sub( lambda match: '%%%2x' % ord(match.group(0)), url) class Link(object): def __init__(self, url, comes_from=None, requires_python=None): """ Object representing a parsed link from https://pypi.python.org/simple/* url: url of the resource pointed to (href of the link) comes_from: instance of HTMLPage where the link was found, or string. requires_python: String containing the `Requires-Python` metadata field, specified in PEP 345. This may be specified by a data-requires-python attribute in the HTML link tag, as described in PEP 503. """ # url can be a UNC windows share if url.startswith('\\\\'): url = path_to_url(url) self.url = url self.comes_from = comes_from self.requires_python = requires_python if requires_python else None def __str__(self): if self.requires_python: rp = ' (requires-python:%s)' % self.requires_python else: rp = '' if self.comes_from: return '%s (from %s)%s' % (self.url, self.comes_from, rp) else: return str(self.url) def __repr__(self): return '<Link %s>' % self def __eq__(self, other): if not isinstance(other, Link): return NotImplemented return self.url == other.url def __ne__(self, other): if not isinstance(other, Link): return NotImplemented return self.url != other.url def __lt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url < other.url def __le__(self, other): if not isinstance(other, Link): return NotImplemented return self.url <= other.url def __gt__(self, other): if not isinstance(other, Link): return NotImplemented return self.url > other.url def __ge__(self, other): if not isinstance(other, Link): return NotImplemented return self.url >= other.url def __hash__(self): return hash(self.url) @property def filename(self): _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) name = posixpath.basename(path.rstrip('/')) or netloc name = urllib_parse.unquote(name) assert name, ('URL %r produced no filename' % self.url) return name @property def scheme(self): return urllib_parse.urlsplit(self.url)[0] @property def netloc(self): return urllib_parse.urlsplit(self.url)[1] @property def path(self): return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) def splitext(self): return splitext(posixpath.basename(self.path.rstrip('/'))) @property def ext(self): return self.splitext()[1] @property def url_without_fragment(self): scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') @property def egg_fragment(self): match = self._egg_fragment_re.search(self.url) if not match: return None return match.group(1) _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') @property def subdirectory_fragment(self): match = self._subdirectory_fragment_re.search(self.url) if not match: return None return match.group(1) _hash_re = re.compile( r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' ) @property def hash(self): match = self._hash_re.search(self.url) if match: return match.group(2) return None @property def hash_name(self): match = self._hash_re.search(self.url) if match: return match.group(1) return None @property def show_url(self): return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) @property def is_wheel(self): return self.ext == wheel_ext @property def is_artifact(self): """ Determines if this points to an actual artifact (e.g. a tarball) or if it points to an "abstract" thing like a path or a VCS location. """ from pip.vcs import vcs if self.scheme in vcs.all_schemes: return False return True FormatControl = namedtuple('FormatControl', 'no_binary only_binary') """This object has two fields, no_binary and only_binary. If a field is falsy, it isn't set. If it is {':all:'}, it should match all packages except those listed in the other field. Only one field can be set to {':all:'} at a time. The rest of the time exact package name matches are listed, with any given package only showing up in one field at a time. """ def fmt_ctl_handle_mutual_exclude(value, target, other): new = value.split(',') while ':all:' in new: other.clear() target.clear() target.add(':all:') del new[:new.index(':all:') + 1] if ':none:' not in new: # Without a none, we want to discard everything as :all: covers it return for name in new: if name == ':none:': target.clear() continue name = canonicalize_name(name) other.discard(name) target.add(name) def fmt_ctl_formats(fmt_ctl, canonical_name): result = set(["binary", "source"]) if canonical_name in fmt_ctl.only_binary: result.discard('source') elif canonical_name in fmt_ctl.no_binary: result.discard('binary') elif ':all:' in fmt_ctl.only_binary: result.discard('source') elif ':all:' in fmt_ctl.no_binary: result.discard('binary') return frozenset(result) def fmt_ctl_no_binary(fmt_ctl): fmt_ctl_handle_mutual_exclude( ':all:', fmt_ctl.no_binary, fmt_ctl.only_binary) def fmt_ctl_no_use_wheel(fmt_ctl): fmt_ctl_no_binary(fmt_ctl) warnings.warn( '--no-use-wheel is deprecated and will be removed in the future. ' ' Please use --no-binary :all: instead.', RemovedInPip10Warning, stacklevel=2) Search = namedtuple('Search', 'supplied canonical formats') """Capture key aspects of a search. :attribute supplied: The user supplied package. :attribute canonical: The canonical package name. :attribute formats: The formats allowed for this package. Should be a set with 'binary' or 'source' or both in it. """
xkmato/tracpro
refs/heads/master
tracpro/orgs_ext/constants.py
2
from __future__ import unicode_literals from enum import Enum class TaskType(Enum): sync_contacts = 1 fetch_runs = 2
ofreshy/vast
refs/heads/master
vast/resources/__init__.py
1
from os import path THIS_DIR = path.dirname(__file__) SIMPLE_WRAPPER_XML = path.join(THIS_DIR, "simple_wrapper_v2.xml") SIMPLE_INLINE_XML = path.join(THIS_DIR, "simple_inline_v2.xml") INLINE_MULTI_FILES_XML = path.join(THIS_DIR, "inline_multi_media_files_v2.xml") INLINE_WITH_TRACKING_EVENTS_XML = path.join(THIS_DIR, "inline_with_tracking_events_v2.xml") INLINE_WITH_CREATIVE_ATTRIBUTES = path.join(THIS_DIR, "inline_with_creative_attributes_v2.xml") INLINE_WITH_VIDEO_CLICKS = path.join(THIS_DIR, "inline_with_video_clicks_v2.xml") INLINE_WITH_AD_PARAMETERS = path.join(THIS_DIR, "inline_with_ad_parameters_v2.xml") INLINE_WITH_NON_LINEAR_ADS = path.join(THIS_DIR, "inline_with_non_linear_ads_v2.xml") INLINE_WITH_COMPANION_ADS = path.join(THIS_DIR, "inline_with_companion_ads_v2.xml")
dalegregory/odoo
refs/heads/8.0
addons/web_api/__openerp__.py
384
{ 'name': 'OpenERP Web API', 'category': 'Hidden', 'description': """ Openerp Web API. ================ """, 'version': '2.0', 'depends': ['web'], 'installable': True, 'auto_install': False, }
shaohung001/tornado
refs/heads/master
tornado/stack_context.py
248
#!/usr/bin/env python # # Copyright 2010 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """`StackContext` allows applications to maintain threadlocal-like state that follows execution as it moves to other execution contexts. The motivating examples are to eliminate the need for explicit ``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to allow some additional context to be kept for logging. This is slightly magic, but it's an extension of the idea that an exception handler is a kind of stack-local state and when that stack is suspended and resumed in a new context that state needs to be preserved. `StackContext` shifts the burden of restoring that state from each call site (e.g. wrapping each `.AsyncHTTPClient` callback in ``async_callback``) to the mechanisms that transfer control from one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`, thread pools, etc). Example usage:: @contextlib.contextmanager def die_on_error(): try: yield except Exception: logging.error("exception in asynchronous operation",exc_info=True) sys.exit(1) with StackContext(die_on_error): # Any exception thrown here *or in callback and its descendants* # will cause the process to exit instead of spinning endlessly # in the ioloop. http_client.fetch(url, callback) ioloop.start() Most applications shouldn't have to work with `StackContext` directly. Here are a few rules of thumb for when it's necessary: * If you're writing an asynchronous library that doesn't rely on a stack_context-aware library like `tornado.ioloop` or `tornado.iostream` (for example, if you're writing a thread pool), use `.stack_context.wrap()` before any asynchronous operations to capture the stack context from where the operation was started. * If you're writing an asynchronous library that has some shared resources (such as a connection pool), create those shared resources within a ``with stack_context.NullContext():`` block. This will prevent ``StackContexts`` from leaking from one request to another. * If you want to write something like an exception handler that will persist across asynchronous calls, create a new `StackContext` (or `ExceptionStackContext`), and make your asynchronous calls in a ``with`` block that references your `StackContext`. """ from __future__ import absolute_import, division, print_function, with_statement import sys import threading from tornado.util import raise_exc_info class StackContextInconsistentError(Exception): pass class _State(threading.local): def __init__(self): self.contexts = (tuple(), None) _state = _State() class StackContext(object): """Establishes the given context as a StackContext that will be transferred. Note that the parameter is a callable that returns a context manager, not the context itself. That is, where for a non-transferable context manager you would say:: with my_context(): StackContext takes the function itself rather than its result:: with StackContext(my_context): The result of ``with StackContext() as cb:`` is a deactivation callback. Run this callback when the StackContext is no longer needed to ensure that it is not propagated any further (note that deactivating a context does not affect any instances of that context that are currently pending). This is an advanced feature and not necessary in most applications. """ def __init__(self, context_factory): self.context_factory = context_factory self.contexts = [] self.active = True def _deactivate(self): self.active = False # StackContext protocol def enter(self): context = self.context_factory() self.contexts.append(context) context.__enter__() def exit(self, type, value, traceback): context = self.contexts.pop() context.__exit__(type, value, traceback) # Note that some of this code is duplicated in ExceptionStackContext # below. ExceptionStackContext is more common and doesn't need # the full generality of this class. def __enter__(self): self.old_contexts = _state.contexts self.new_contexts = (self.old_contexts[0] + (self,), self) _state.contexts = self.new_contexts try: self.enter() except: _state.contexts = self.old_contexts raise return self._deactivate def __exit__(self, type, value, traceback): try: self.exit(type, value, traceback) finally: final_contexts = _state.contexts _state.contexts = self.old_contexts # Generator coroutines and with-statements with non-local # effects interact badly. Check here for signs of # the stack getting out of sync. # Note that this check comes after restoring _state.context # so that if it fails things are left in a (relatively) # consistent state. if final_contexts is not self.new_contexts: raise StackContextInconsistentError( 'stack_context inconsistency (may be caused by yield ' 'within a "with StackContext" block)') # Break up a reference to itself to allow for faster GC on CPython. self.new_contexts = None class ExceptionStackContext(object): """Specialization of StackContext for exception handling. The supplied ``exception_handler`` function will be called in the event of an uncaught exception in this context. The semantics are similar to a try/finally clause, and intended use cases are to log an error, close a socket, or similar cleanup actions. The ``exc_info`` triple ``(type, value, traceback)`` will be passed to the exception_handler function. If the exception handler returns true, the exception will be consumed and will not be propagated to other exception handlers. """ def __init__(self, exception_handler): self.exception_handler = exception_handler self.active = True def _deactivate(self): self.active = False def exit(self, type, value, traceback): if type is not None: return self.exception_handler(type, value, traceback) def __enter__(self): self.old_contexts = _state.contexts self.new_contexts = (self.old_contexts[0], self) _state.contexts = self.new_contexts return self._deactivate def __exit__(self, type, value, traceback): try: if type is not None: return self.exception_handler(type, value, traceback) finally: final_contexts = _state.contexts _state.contexts = self.old_contexts if final_contexts is not self.new_contexts: raise StackContextInconsistentError( 'stack_context inconsistency (may be caused by yield ' 'within a "with StackContext" block)') # Break up a reference to itself to allow for faster GC on CPython. self.new_contexts = None class NullContext(object): """Resets the `StackContext`. Useful when creating a shared resource on demand (e.g. an `.AsyncHTTPClient`) where the stack that caused the creating is not relevant to future operations. """ def __enter__(self): self.old_contexts = _state.contexts _state.contexts = (tuple(), None) def __exit__(self, type, value, traceback): _state.contexts = self.old_contexts def _remove_deactivated(contexts): """Remove deactivated handlers from the chain""" # Clean ctx handlers stack_contexts = tuple([h for h in contexts[0] if h.active]) # Find new head head = contexts[1] while head is not None and not head.active: head = head.old_contexts[1] # Process chain ctx = head while ctx is not None: parent = ctx.old_contexts[1] while parent is not None: if parent.active: break ctx.old_contexts = parent.old_contexts parent = parent.old_contexts[1] ctx = parent return (stack_contexts, head) def wrap(fn): """Returns a callable object that will restore the current `StackContext` when executed. Use this whenever saving a callback to be executed later in a different execution context (either in a different thread or asynchronously in the same thread). """ # Check if function is already wrapped if fn is None or hasattr(fn, '_wrapped'): return fn # Capture current stack head # TODO: Any other better way to store contexts and update them in wrapped function? cap_contexts = [_state.contexts] if not cap_contexts[0][0] and not cap_contexts[0][1]: # Fast path when there are no active contexts. def null_wrapper(*args, **kwargs): try: current_state = _state.contexts _state.contexts = cap_contexts[0] return fn(*args, **kwargs) finally: _state.contexts = current_state null_wrapper._wrapped = True return null_wrapper def wrapped(*args, **kwargs): ret = None try: # Capture old state current_state = _state.contexts # Remove deactivated items cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0]) # Force new state _state.contexts = contexts # Current exception exc = (None, None, None) top = None # Apply stack contexts last_ctx = 0 stack = contexts[0] # Apply state for n in stack: try: n.enter() last_ctx += 1 except: # Exception happened. Record exception info and store top-most handler exc = sys.exc_info() top = n.old_contexts[1] # Execute callback if no exception happened while restoring state if top is None: try: ret = fn(*args, **kwargs) except: exc = sys.exc_info() top = contexts[1] # If there was exception, try to handle it by going through the exception chain if top is not None: exc = _handle_exception(top, exc) else: # Otherwise take shorter path and run stack contexts in reverse order while last_ctx > 0: last_ctx -= 1 c = stack[last_ctx] try: c.exit(*exc) except: exc = sys.exc_info() top = c.old_contexts[1] break else: top = None # If if exception happened while unrolling, take longer exception handler path if top is not None: exc = _handle_exception(top, exc) # If exception was not handled, raise it if exc != (None, None, None): raise_exc_info(exc) finally: _state.contexts = current_state return ret wrapped._wrapped = True return wrapped def _handle_exception(tail, exc): while tail is not None: try: if tail.exit(*exc): exc = (None, None, None) except: exc = sys.exc_info() tail = tail.old_contexts[1] return exc def run_with_stack_context(context, func): """Run a coroutine ``func`` in the given `StackContext`. It is not safe to have a ``yield`` statement within a ``with StackContext`` block, so it is difficult to use stack context with `.gen.coroutine`. This helper function runs the function in the correct context while keeping the ``yield`` and ``with`` statements syntactically separate. Example:: @gen.coroutine def incorrect(): with StackContext(ctx): # ERROR: this will raise StackContextInconsistentError yield other_coroutine() @gen.coroutine def correct(): yield run_with_stack_context(StackContext(ctx), other_coroutine) .. versionadded:: 3.1 """ with context: return func()
40223136/w17test1
refs/heads/master
static/Brython3.1.1-20150328-091302/Lib/_socket.py
742
"""Implementation module for socket operations. See the socket module for documentation.""" AF_APPLETALK = 16 AF_DECnet = 12 AF_INET = 2 AF_INET6 = 23 AF_IPX = 6 AF_IRDA = 26 AF_SNA = 11 AF_UNSPEC = 0 AI_ADDRCONFIG = 1024 AI_ALL = 256 AI_CANONNAME = 2 AI_NUMERICHOST = 4 AI_NUMERICSERV = 8 AI_PASSIVE = 1 AI_V4MAPPED = 2048 CAPI = '<capsule object "_socket.CAPI" at 0x00BC4F38>' EAI_AGAIN = 11002 EAI_BADFLAGS = 10022 EAI_FAIL = 11003 EAI_FAMILY = 10047 EAI_MEMORY = 8 EAI_NODATA = 11001 EAI_NONAME = 11001 EAI_SERVICE = 10109 EAI_SOCKTYPE = 10044 INADDR_ALLHOSTS_GROUP = -536870911 INADDR_ANY = 0 INADDR_BROADCAST = -1 INADDR_LOOPBACK = 2130706433 INADDR_MAX_LOCAL_GROUP = -536870657 INADDR_NONE = -1 INADDR_UNSPEC_GROUP = -536870912 IPPORT_RESERVED = 1024 IPPORT_USERRESERVED = 5000 IPPROTO_ICMP = 1 IPPROTO_IP = 0 IPPROTO_RAW = 255 IPPROTO_TCP = 6 IPPROTO_UDP = 17 IPV6_CHECKSUM = 26 IPV6_DONTFRAG = 14 IPV6_HOPLIMIT = 21 IPV6_HOPOPTS = 1 IPV6_JOIN_GROUP = 12 IPV6_LEAVE_GROUP = 13 IPV6_MULTICAST_HOPS = 10 IPV6_MULTICAST_IF = 9 IPV6_MULTICAST_LOOP = 11 IPV6_PKTINFO = 19 IPV6_RECVRTHDR = 38 IPV6_RECVTCLASS = 40 IPV6_RTHDR = 32 IPV6_TCLASS = 39 IPV6_UNICAST_HOPS = 4 IPV6_V6ONLY = 27 IP_ADD_MEMBERSHIP = 12 IP_DROP_MEMBERSHIP = 13 IP_HDRINCL = 2 IP_MULTICAST_IF = 9 IP_MULTICAST_LOOP = 11 IP_MULTICAST_TTL = 10 IP_OPTIONS = 1 IP_RECVDSTADDR = 25 IP_TOS = 3 IP_TTL = 4 MSG_BCAST = 1024 MSG_CTRUNC = 512 MSG_DONTROUTE = 4 MSG_MCAST = 2048 MSG_OOB = 1 MSG_PEEK = 2 MSG_TRUNC = 256 NI_DGRAM = 16 NI_MAXHOST = 1025 NI_MAXSERV = 32 NI_NAMEREQD = 4 NI_NOFQDN = 1 NI_NUMERICHOST = 2 NI_NUMERICSERV = 8 RCVALL_MAX = 3 RCVALL_OFF = 0 RCVALL_ON = 1 RCVALL_SOCKETLEVELONLY = 2 SHUT_RD = 0 SHUT_RDWR = 2 SHUT_WR = 1 SIO_KEEPALIVE_VALS = 2550136836 SIO_RCVALL = 2550136833 SOCK_DGRAM = 2 SOCK_RAW = 3 SOCK_RDM = 4 SOCK_SEQPACKET = 5 SOCK_STREAM = 1 SOL_IP = 0 SOL_SOCKET = 65535 SOL_TCP = 6 SOL_UDP = 17 SOMAXCONN = 2147483647 SO_ACCEPTCONN = 2 SO_BROADCAST = 32 SO_DEBUG = 1 SO_DONTROUTE = 16 SO_ERROR = 4103 SO_EXCLUSIVEADDRUSE = -5 SO_KEEPALIVE = 8 SO_LINGER = 128 SO_OOBINLINE = 256 SO_RCVBUF = 4098 SO_RCVLOWAT = 4100 SO_RCVTIMEO = 4102 SO_REUSEADDR = 4 SO_SNDBUF = 4097 SO_SNDLOWAT = 4099 SO_SNDTIMEO = 4101 SO_TYPE = 4104 SO_USELOOPBACK = 64 class SocketType: pass TCP_MAXSEG = 4 TCP_NODELAY = 1 __loader__ = '<_frozen_importlib.ExtensionFileLoader object at 0x00CA2D90>' def dup(*args,**kw): """dup(integer) -> integer Duplicate an integer socket file descriptor. This is like os.dup(), but for sockets; on some platforms os.dup() won't work for socket file descriptors.""" pass class error: pass class gaierror: pass def getaddrinfo(*args,**kw): """getaddrinfo(host, port [, family, socktype, proto, flags]) -> list of (family, socktype, proto, canonname, sockaddr) Resolve host and port into addrinfo struct.""" pass def getdefaulttimeout(*args,**kw): """getdefaulttimeout() -> timeout Returns the default timeout in seconds (float) for new socket objects. A value of None indicates that new socket objects have no timeout. When the socket module is first imported, the default is None.""" pass def gethostbyaddr(*args,**kw): """gethostbyaddr(host) -> (name, aliaslist, addresslist) Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number.""" pass def gethostbyname(*args,**kw): """gethostbyname(host) -> address Return the IP address (a string of the form '255.255.255.255') for a host.""" pass def gethostbyname_ex(*args,**kw): """gethostbyname_ex(host) -> (name, aliaslist, addresslist) Return the true host name, a list of aliases, and a list of IP addresses, for a host. The host argument is a string giving a host name or IP number.""" pass def gethostname(*args,**kw): """gethostname() -> string Return the current host name.""" pass def getnameinfo(*args,**kw): """getnameinfo(sockaddr, flags) --> (host, port) Get host and port for a sockaddr.""" pass def getprotobyname(*args,**kw): """getprotobyname(name) -> integer Return the protocol number for the named protocol. (Rarely used.)""" pass def getservbyname(*args,**kw): """getservbyname(servicename[, protocolname]) -> integer Return a port number from a service name and protocol name. The optional protocol name, if given, should be 'tcp' or 'udp', otherwise any protocol will match.""" pass def getservbyport(*args,**kw): """getservbyport(port[, protocolname]) -> string Return the service name from a port number and protocol name. The optional protocol name, if given, should be 'tcp' or 'udp', otherwise any protocol will match.""" pass has_ipv6 = True class herror: pass def htonl(*args,**kw): """htonl(integer) -> integer Convert a 32-bit integer from host to network byte order.""" pass def htons(*args,**kw): """htons(integer) -> integer Convert a 16-bit integer from host to network byte order.""" pass def inet_aton(*args,**kw): """inet_aton(string) -> bytes giving packed 32-bit IP representation Convert an IP address in string format (123.45.67.89) to the 32-bit packed binary format used in low-level network functions.""" pass def inet_ntoa(*args,**kw): """inet_ntoa(packed_ip) -> ip_address_string Convert an IP address from 32-bit packed binary format to string format""" pass def ntohl(*args,**kw): """ntohl(integer) -> integer Convert a 32-bit integer from network to host byte order.""" pass def ntohs(*args,**kw): """ntohs(integer) -> integer Convert a 16-bit integer from network to host byte order.""" pass def setdefaulttimeout(*args,**kw): """setdefaulttimeout(timeout) Set the default timeout in seconds (float) for new socket objects. A value of None indicates that new socket objects have no timeout. When the socket module is first imported, the default is None.""" pass class socket: def __init__(self,*args,**kw): pass def bind(self,*args,**kw): pass def close(self): pass class timeout: pass
dmigo/incubator-superset
refs/heads/master
superset/legacy.py
2
# -*- coding: utf-8 -*- # pylint: disable=C,R,W """Code related with dealing with legacy / change management""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import re from superset import frontend_config FORM_DATA_KEY_WHITELIST = list(frontend_config.get('controls').keys()) + ['slice_id'] def cast_filter_data(form_data): """Used by cast_form_data to parse the filters""" flts = [] having_flts = [] fd = form_data filter_pattern = re.compile(r"""((?:[^,"']|"[^"]*"|'[^']*')+)""") for i in range(0, 10): for prefix in ['flt', 'having']: col_str = '{}_col_{}'.format(prefix, i) op_str = '{}_op_{}'.format(prefix, i) val_str = '{}_eq_{}'.format(prefix, i) if col_str in fd and op_str in fd and val_str in fd \ and len(fd[val_str]) > 0: f = {} f['col'] = fd[col_str] f['op'] = fd[op_str] if prefix == 'flt': # transfer old strings in filter value to list splitted = filter_pattern.split(fd[val_str])[1::2] values = [types.replace("'", '').strip() for types in splitted] f['val'] = values flts.append(f) if prefix == 'having': f['val'] = fd[val_str] having_flts.append(f) if col_str in fd: del fd[col_str] if op_str in fd: del fd[op_str] if val_str in fd: del fd[val_str] fd['filters'] = flts fd['having_filters'] = having_flts return fd def cast_form_data(form_data): """Translates old to new form_data""" d = {} fields = frontend_config.get('controls', {}) for k, v in form_data.items(): field_config = fields.get(k, {}) ft = field_config.get('type') if ft == 'CheckboxControl': # bug in some urls with dups on bools if isinstance(v, list): v = 'y' in v else: v = True if v in ('true', 'y') or v is True else False elif v and ft == 'TextControl' and field_config.get('isInt'): v = int(v) if v != '' else None elif v and ft == 'TextControl' and field_config.get('isFloat'): v = float(v) if v != '' else None elif v and ft == 'SelectControl': if field_config.get('multi'): if type(form_data).__name__ == 'ImmutableMultiDict': v = form_data.getlist(k) elif not isinstance(v, list): v = [v] if d.get('slice_id'): d['slice_id'] = int(d['slice_id']) d[k] = v if 'filters' not in d: d = cast_filter_data(d) for k in list(d.keys()): if k not in FORM_DATA_KEY_WHITELIST: del d[k] return d
Microvellum/Fluid-Designer
refs/heads/master
win64-vc/2.78/Python/lib/site-packages/wheel/__init__.py
233
# __variables__ with double-quoted values will be available in setup.py: __version__ = "0.29.0"
harrygg/plugin.program.tvbgpvr.backend
refs/heads/master
resources/lib/utils.py
1
# -*- coding: utf-8 -*- import os import sys import json import time import xbmc import xbmcaddon reload(sys) sys.setdefaultencoding('utf8') class Settings(): def __getattr__(self, name): temp = this.getSetting(name) if (name != "debug"): log ("getting setting %s=%s" % (name, temp)) if temp.lower() == 'true': return True elif temp.lower() == 'false': return False elif temp.isdigit(): return int(temp) else: return temp def __setattr__(self, name, value): this.setSetting(name, str(value)) def log(msg, level=xbmc.LOGDEBUG): try: if settings.debug and level == xbmc.LOGDEBUG: level = xbmc.LOGNOTICE xbmc.log('%s | %s' % (id, str(msg).encode('utf-8')), level) except Exception as e: try: xbmc.log('%s | Logging failure: %s' % (id, e), level) except: pass def show_progress(progress_bar, percent, msg): if progress_bar: progress_bar.update(percent, str(msg)) log(msg) def notify(msg): log("notify() %s" % msg) command = "Notification(%s,%s,%s)" % (translate(32003), msg, 5000) xbmc.executebuiltin(command) def notify_error(msg): log("notify_error() %s" % msg) command = "Notification(%s,%s,%s)" % (translate(32005), msg, 5000) xbmc.executebuiltin(command) def __update__(action, location, crash=None): try: lu = settings.last_update day = time.strftime("%d") if lu != day: settings.last_update = day from ga import ga p = {} p['an'] = this.getAddonInfo('name').decode('utf-8') p['av'] = this.getAddonInfo('version') p['ec'] = 'Addon actions' p['ea'] = action p['ev'] = '1' p['ul'] = xbmc.getLanguage() p['cd'] = location ga('UA-79422131-10').update(p, crash) except Exception, er: log(er) def get_template_file(): template_file = settings.template_file if not os.path.isfile(template_file): cwd = xbmc.translatePath( this.getAddonInfo('path') ).decode('utf-8') template_file = os.path.join(cwd, 'resources', 'order.txt') return template_file def get_disabled_groups(): disabled_groups = [] if settings.hide_children: disabled_groups.append('Детски') if settings.hide_docs: disabled_groups.append('Документални') if settings.hide_french: disabled_groups.append('Френски') if settings.hide_english: disabled_groups.append('Английски') if settings.hide_german: disabled_groups.append('Немски') if settings.hide_holland: disabled_groups.append('Холандски') if settings.hide_italian: disabled_groups.append('Италиански') if settings.hide_movies: disabled_groups.append('Филми') if settings.hide_music: disabled_groups.append('Музикални') if settings.hide_news: disabled_groups.append('Новини') if settings.hide_russian: disabled_groups.append('Руски') if settings.hide_serbian: disabled_groups.append('Сръбски') if settings.hide_theme: disabled_groups.append('Тематични') if settings.hide_turkish: disabled_groups.append('Турски') if settings.hide_xxx: disabled_groups.append('Възрастни') if settings.hide_sports: disabled_groups.append('Спортни') if settings.hide_bulgarian: disabled_groups.append('Български') if settings.hide_asia: disabled_groups.append('Азиатски') if settings.hide_greek: disabled_groups.append('Гръцки') if settings.hide_roman: disabled_groups.append('Румънски') if settings.hide_others: disabled_groups.append('Други') if settings.hide_information_pr: disabled_groups.append('information') if settings.hide_movies_pr: disabled_groups.append('cinema') if settings.hide_news_pr: disabled_groups.append('news') if settings.hide_docs_pr: disabled_groups.append('documentary') if settings.hide_sports_pr: disabled_groups.append('sports') if settings.hide_entertainments_pr: disabled_groups.append('entertainments') if settings.hide_russian_pr: disabled_groups.append('Russian') if settings.hide_music_pr: disabled_groups.append('music') if settings.hide_children_pr: disabled_groups.append('children\'s') if settings.hide_xxx_pr: disabled_groups.append('for adults') if settings.hide_free_pr: disabled_groups.append('free web tv') if settings.hide_culture_pr: disabled_groups.append('culture') if settings.hide_greek_pr: disabled_groups.append('greek') if settings.hide_roman_pr: disabled_groups.append('romanian') return disabled_groups def get_location(): location = settings.url + settings.mac if os.environ.get('TVBGPVRDEBUG'): location = os.environ['TVBGPVRDEBUG'] return location def get_stream_url(name): """ Reads stream list from cache and returns url of the selected stream name """ try: # deserialize streams # streams = cPickle.load(open(pl_streams)) streams = json.load(open(pl_streams)) log("Deserialized %s streams from file %s" % (len(streams), pl_streams)) return streams.get(name.decode("utf-8")) except Exception as er: log(er) return None ## Initialize the addon id = 'plugin.program.tvbgpvr.backend' this = xbmcaddon.Addon() translate = this.getLocalizedString settings = Settings() pl_name = 'playlist.m3u' profile_dir = xbmc.translatePath(this.getAddonInfo('profile')).decode('utf-8') pl_path = os.path.join(profile_dir, pl_name) pl_cache = os.path.join(profile_dir, ".cache") pl_streams = os.path.join(profile_dir, ".streams") __version__ = xbmc.getInfoLabel('System.BuildVersion') VERSION = int(__version__[0:2]) user_agent = 'Kodi %s' % __version__ scheduled_run = len(sys.argv) > 1 and sys.argv[1] == str(True) addon_dir = this.getAddonInfo('path').decode('utf-8') mapping_file = xbmc.translatePath(os.path.join( addon_dir, 'resources', 'mapping.json' )) progress_bar = None ### Literals RUNSCRIPT = 'RunScript(%s, True)' % id GET = 'GET' HEAD = 'HEAD' NEWLINE = '\n' BIND_IP = '0.0.0.0' if settings.bind_all else '127.0.0.1' STREAM_URL = 'http://' + settings.stream_ip + ':' + str(settings.port) + '/tvbgpvr.backend/stream/%s' HD = 'HD' SD = 'SD' LQ = 'LQ' START_MARKER = "#EXTM3U" INFO_MARKER = "#EXTINF" ALL = "Всички" ### Addon starts if settings.firstrun: this.openSettings() settings.firstrun = False __update__('operation', 'start') class PlaylistType: KODIPVR = "KODIPVR" PLAIN = "PLAIN" NAMES = "NAMES" JSON = "JSON"
kemalakyol48/python-for-android
refs/heads/master
python-modules/twisted/twisted/protocols/test/test_tls.py
60
# Copyright (c) 2009 Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.protocols.tls}. """ try: from twisted.protocols.tls import TLSMemoryBIOProtocol, TLSMemoryBIOFactory except ImportError: # Skip the whole test module if it can't be imported. skip = "pyOpenSSL 0.10 or newer required for twisted.protocol.tls" else: # Otherwise, the pyOpenSSL dependency must be satisfied, so all these # imports will work. from OpenSSL.crypto import X509Type from OpenSSL.SSL import TLSv1_METHOD, Error, Context, ConnectionType from twisted.internet.ssl import ClientContextFactory, PrivateCertificate from twisted.internet.ssl import DefaultOpenSSLContextFactory from twisted.python.filepath import FilePath from twisted.internet.interfaces import ISystemHandle, ISSLTransport from twisted.internet.error import ConnectionDone from twisted.internet.defer import Deferred, gatherResults from twisted.internet.protocol import Protocol, ClientFactory, ServerFactory from twisted.protocols.loopback import loopbackAsync, collapsingPumpPolicy from twisted.trial.unittest import TestCase from twisted.test.test_tcp import ConnectionLostNotifyingProtocol from twisted.test.test_ssl import certPath from twisted.test.proto_helpers import StringTransport class HandshakeCallbackContextFactory: """ L{HandshakeCallbackContextFactory} is a factory for SSL contexts which allows applications to get notification when the SSL handshake completes. @ivar _finished: A L{Deferred} which will be called back when the handshake is done. """ # pyOpenSSL needs to expose this. # https://bugs.launchpad.net/pyopenssl/+bug/372832 SSL_CB_HANDSHAKE_DONE = 0x20 def __init__(self): self._finished = Deferred() def factoryAndDeferred(cls): """ Create a new L{HandshakeCallbackContextFactory} and return a two-tuple of it and a L{Deferred} which will fire when a connection created with it completes a TLS handshake. """ contextFactory = cls() return contextFactory, contextFactory._finished factoryAndDeferred = classmethod(factoryAndDeferred) def _info(self, connection, where, ret): """ This is the "info callback" on the context. It will be called periodically by pyOpenSSL with information about the state of a connection. When it indicates the handshake is complete, it will fire C{self._finished}. """ if where & self.SSL_CB_HANDSHAKE_DONE: self._finished.callback(None) def getContext(self): """ Create and return an SSL context configured to use L{self._info} as the info callback. """ context = Context(TLSv1_METHOD) context.set_info_callback(self._info) return context class AccumulatingProtocol(Protocol): """ A protocol which collects the bytes it receives and closes its connection after receiving a certain minimum of data. @ivar howMany: The number of bytes of data to wait for before closing the connection. @ivar receiving: A C{list} of C{str} of the bytes received so far. """ def __init__(self, howMany): self.howMany = howMany def connectionMade(self): self.received = [] def dataReceived(self, bytes): self.received.append(bytes) if sum(map(len, self.received)) >= self.howMany: self.transport.loseConnection() class TLSMemoryBIOTests(TestCase): """ Tests for the implementation of L{ISSLTransport} which runs over another L{ITransport}. """ def test_interfaces(self): """ L{TLSMemoryBIOProtocol} instances provide L{ISSLTransport} and L{ISystemHandle}. """ proto = TLSMemoryBIOProtocol(None, None) self.assertTrue(ISSLTransport.providedBy(proto)) self.assertTrue(ISystemHandle.providedBy(proto)) def test_getHandle(self): """ L{TLSMemoryBIOProtocol.getHandle} returns the L{OpenSSL.SSL.Connection} instance it uses to actually implement TLS. This may seem odd. In fact, it is. The L{OpenSSL.SSL.Connection} is not actually the "system handle" here, nor even an object the reactor knows about directly. However, L{twisted.internet.ssl.Certificate}'s C{peerFromTransport} and C{hostFromTransport} methods depend on being able to get an L{OpenSSL.SSL.Connection} object in order to work properly. Implementing L{ISystemHandle.getHandle} like this is the easiest way for those APIs to be made to work. If they are changed, then it may make sense to get rid of this implementation of L{ISystemHandle} and return the underlying socket instead. """ factory = ClientFactory() contextFactory = ClientContextFactory() wrapperFactory = TLSMemoryBIOFactory(contextFactory, True, factory) proto = TLSMemoryBIOProtocol(wrapperFactory, Protocol()) transport = StringTransport() proto.makeConnection(transport) self.assertIsInstance(proto.getHandle(), ConnectionType) def test_makeConnection(self): """ When L{TLSMemoryBIOProtocol} is connected to a transport, it connects the protocol it wraps to a transport. """ clientProtocol = Protocol() clientFactory = ClientFactory() clientFactory.protocol = lambda: clientProtocol contextFactory = ClientContextFactory() wrapperFactory = TLSMemoryBIOFactory( contextFactory, True, clientFactory) sslProtocol = wrapperFactory.buildProtocol(None) transport = StringTransport() sslProtocol.makeConnection(transport) self.assertNotIdentical(clientProtocol.transport, None) self.assertNotIdentical(clientProtocol.transport, transport) def test_handshake(self): """ The TLS handshake is performed when L{TLSMemoryBIOProtocol} is connected to a transport. """ clientFactory = ClientFactory() clientFactory.protocol = Protocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverFactory = ServerFactory() serverFactory.protocol = Protocol serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Only wait for the handshake to complete. Anything after that isn't # important here. return handshakeDeferred def test_handshakeFailure(self): """ L{TLSMemoryBIOProtocol} reports errors in the handshake process to the application-level protocol object using its C{connectionLost} method and disconnects the underlying transport. """ clientConnectionLost = Deferred() clientFactory = ClientFactory() clientFactory.protocol = ( lambda: ConnectionLostNotifyingProtocol( clientConnectionLost)) clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverConnectionLost = Deferred() serverFactory = ServerFactory() serverFactory.protocol = ( lambda: ConnectionLostNotifyingProtocol( serverConnectionLost)) # This context factory rejects any clients which do not present a # certificate. certificateData = FilePath(certPath).getContent() certificate = PrivateCertificate.loadPEM(certificateData) serverContextFactory = certificate.options(certificate) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) def cbConnectionLost(protocol): # The connection should close on its own in response to the error # induced by the client not supplying the required certificate. # After that, check to make sure the protocol's connectionLost was # called with the right thing. protocol.lostConnectionReason.trap(Error) clientConnectionLost.addCallback(cbConnectionLost) serverConnectionLost.addCallback(cbConnectionLost) # Additionally, the underlying transport should have been told to # go away. return gatherResults([ clientConnectionLost, serverConnectionLost, connectionDeferred]) def test_getPeerCertificate(self): """ L{TLSMemoryBIOFactory.getPeerCertificate} returns the L{OpenSSL.crypto.X509Type} instance representing the peer's certificate. """ # Set up a client and server so there's a certificate to grab. clientFactory = ClientFactory() clientFactory.protocol = Protocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverFactory = ServerFactory() serverFactory.protocol = Protocol serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync( sslServerProtocol, sslClientProtocol) # Wait for the handshake def cbHandshook(ignored): # Grab the server's certificate and check it out cert = sslClientProtocol.getPeerCertificate() self.assertIsInstance(cert, X509Type) self.assertEquals( cert.digest('md5'), '9B:A4:AB:43:10:BE:82:AE:94:3E:6B:91:F2:F3:40:E8') handshakeDeferred.addCallback(cbHandshook) return handshakeDeferred def test_writeAfterHandshake(self): """ Bytes written to L{TLSMemoryBIOProtocol} before the handshake is complete are received by the protocol on the other side of the connection once the handshake succeeds. """ bytes = "some bytes" clientProtocol = Protocol() clientFactory = ClientFactory() clientFactory.protocol = lambda: clientProtocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(len(bytes)) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the handshake to finish before writing anything. def cbHandshook(ignored): clientProtocol.transport.write(bytes) # The server will drop the connection once it gets the bytes. return connectionDeferred handshakeDeferred.addCallback(cbHandshook) # Once the connection is lost, make sure the server received the # expected bytes. def cbDisconnected(ignored): self.assertEquals("".join(serverProtocol.received), bytes) handshakeDeferred.addCallback(cbDisconnected) return handshakeDeferred def test_writeBeforeHandshake(self): """ Bytes written to L{TLSMemoryBIOProtocol} before the handshake is complete are received by the protocol on the other side of the connection once the handshake succeeds. """ bytes = "some bytes" class SimpleSendingProtocol(Protocol): def connectionMade(self): self.transport.write(bytes) clientFactory = ClientFactory() clientFactory.protocol = SimpleSendingProtocol clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(len(bytes)) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the connection to end, then make sure the server received # the bytes sent by the client. def cbConnectionDone(ignored): self.assertEquals("".join(serverProtocol.received), bytes) connectionDeferred.addCallback(cbConnectionDone) return connectionDeferred def test_writeSequence(self): """ Bytes written to L{TLSMemoryBIOProtocol} with C{writeSequence} are received by the protocol on the other side of the connection. """ bytes = "some bytes" class SimpleSendingProtocol(Protocol): def connectionMade(self): self.transport.writeSequence(list(bytes)) clientFactory = ClientFactory() clientFactory.protocol = SimpleSendingProtocol clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(len(bytes)) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the connection to end, then make sure the server received # the bytes sent by the client. def cbConnectionDone(ignored): self.assertEquals("".join(serverProtocol.received), bytes) connectionDeferred.addCallback(cbConnectionDone) return connectionDeferred def test_multipleWrites(self): """ If multiple separate TLS messages are received in a single chunk from the underlying transport, all of the application bytes from each message are delivered to the application-level protocol. """ bytes = [str(i) for i in range(10)] class SimpleSendingProtocol(Protocol): def connectionMade(self): for b in bytes: self.transport.write(b) clientFactory = ClientFactory() clientFactory.protocol = SimpleSendingProtocol clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(sum(map(len, bytes))) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol, collapsingPumpPolicy) # Wait for the connection to end, then make sure the server received # the bytes sent by the client. def cbConnectionDone(ignored): self.assertEquals("".join(serverProtocol.received), ''.join(bytes)) connectionDeferred.addCallback(cbConnectionDone) return connectionDeferred def test_hugeWrite(self): """ If a very long string is passed to L{TLSMemoryBIOProtocol.write}, any trailing part of it which cannot be send immediately is buffered and sent later. """ bytes = "some bytes" factor = 8192 class SimpleSendingProtocol(Protocol): def connectionMade(self): self.transport.write(bytes * factor) clientFactory = ClientFactory() clientFactory.protocol = SimpleSendingProtocol clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = AccumulatingProtocol(len(bytes) * factor) serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the connection to end, then make sure the server received # the bytes sent by the client. def cbConnectionDone(ignored): self.assertEquals("".join(serverProtocol.received), bytes * factor) connectionDeferred.addCallback(cbConnectionDone) return connectionDeferred def test_disorderlyShutdown(self): """ If a L{TLSMemoryBIOProtocol} loses its connection unexpectedly, this is reported to the application. """ clientConnectionLost = Deferred() clientFactory = ClientFactory() clientFactory.protocol = ( lambda: ConnectionLostNotifyingProtocol( clientConnectionLost)) clientContextFactory = HandshakeCallbackContextFactory() wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) # Client speaks first, so the server can be dumb. serverProtocol = Protocol() connectionDeferred = loopbackAsync(serverProtocol, sslClientProtocol) # Now destroy the connection. serverProtocol.transport.loseConnection() # And when the connection completely dies, check the reason. def cbDisconnected(clientProtocol): clientProtocol.lostConnectionReason.trap(Error) clientConnectionLost.addCallback(cbDisconnected) return clientConnectionLost def test_loseConnectionAfterHandshake(self): """ L{TLSMemoryBIOProtocol.loseConnection} sends a TLS close alert and shuts down the underlying connection. """ clientConnectionLost = Deferred() clientFactory = ClientFactory() clientFactory.protocol = ( lambda: ConnectionLostNotifyingProtocol( clientConnectionLost)) clientContextFactory, handshakeDeferred = ( HandshakeCallbackContextFactory.factoryAndDeferred()) wrapperFactory = TLSMemoryBIOFactory( clientContextFactory, True, clientFactory) sslClientProtocol = wrapperFactory.buildProtocol(None) serverProtocol = Protocol() serverFactory = ServerFactory() serverFactory.protocol = lambda: serverProtocol serverContextFactory = DefaultOpenSSLContextFactory(certPath, certPath) wrapperFactory = TLSMemoryBIOFactory( serverContextFactory, False, serverFactory) sslServerProtocol = wrapperFactory.buildProtocol(None) connectionDeferred = loopbackAsync(sslServerProtocol, sslClientProtocol) # Wait for the handshake before dropping the connection. def cbHandshake(ignored): serverProtocol.transport.loseConnection() # Now wait for the client to notice. return clientConnectionLost handshakeDeferred.addCallback(cbHandshake) # Wait for the connection to end, then make sure the client was # notified of a handshake failure. def cbConnectionDone(clientProtocol): clientProtocol.lostConnectionReason.trap(ConnectionDone) # The server should have closed its underlying transport, in # addition to whatever it did to shut down the TLS layer. self.assertTrue(serverProtocol.transport.q.disconnect) # The client should also have closed its underlying transport once # it saw the server shut down the TLS layer, so as to avoid relying # on the server to close the underlying connection. self.assertTrue(clientProtocol.transport.q.disconnect) handshakeDeferred.addCallback(cbConnectionDone) return handshakeDeferred
transcranial/gensim
refs/heads/develop
gensim/test/test_doc2vec.py
8
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz> # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html """ Automated tests for checking transformation algorithms (the models package). """ from __future__ import with_statement import logging import unittest import os import tempfile from six.moves import zip as izip from collections import namedtuple import numpy as np from gensim import utils, matutils from gensim.models import doc2vec module_path = os.path.dirname(__file__) # needed because sample data files are located in the same folder datapath = lambda fname: os.path.join(module_path, 'test_data', fname) class DocsLeeCorpus(object): def __init__(self, string_tags=False): self.string_tags = string_tags def _tag(self, i): return i if not self.string_tags else '_*%d' % i def __iter__(self): with open(datapath('lee_background.cor')) as f: for i, line in enumerate(f): yield doc2vec.TaggedDocument(utils.simple_preprocess(line), [self._tag(i)]) list_corpus = list(DocsLeeCorpus()) sentences = [ ['human', 'interface', 'computer'], ['survey', 'user', 'computer', 'system', 'response', 'time'], ['eps', 'user', 'interface', 'system'], ['system', 'human', 'system', 'eps'], ['user', 'response', 'time'], ['trees'], ['graph', 'trees'], ['graph', 'minors', 'trees'], ['graph', 'minors', 'survey'] ] sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(sentences)] def testfile(): # temporary data will be stored to this file return os.path.join(tempfile.gettempdir(), 'gensim_doc2vec.tst') class TestDoc2VecModel(unittest.TestCase): def test_persistence(self): """Test storing/loading the entire model.""" model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1) model.save(testfile()) self.models_equal(model, doc2vec.Doc2Vec.load(testfile())) def test_load_mmap(self): """Test storing/loading the entire model.""" model = doc2vec.Doc2Vec(sentences, min_count=1) # test storing the internal arrays into separate files model.save(testfile(), sep_limit=0) self.models_equal(model, doc2vec.Doc2Vec.load(testfile())) # make sure mmaping the arrays back works, too self.models_equal(model, doc2vec.Doc2Vec.load(testfile(), mmap='r')) def test_int_doctags(self): """Test doc2vec doctag alternatives""" corpus = DocsLeeCorpus() model = doc2vec.Doc2Vec(min_count=1) model.build_vocab(corpus) self.assertEqual(len(model.docvecs.doctag_syn0), 300) self.assertEqual(model.docvecs[0].shape, (300,)) self.assertRaises(KeyError, model.__getitem__, '_*0') def test_string_doctags(self): """Test doc2vec doctag alternatives""" corpus = list(DocsLeeCorpus(True)) # force duplicated tags corpus = corpus[0:10] + corpus model = doc2vec.Doc2Vec(min_count=1) model.build_vocab(corpus) self.assertEqual(len(model.docvecs.doctag_syn0), 300) self.assertEqual(model.docvecs[0].shape, (300,)) self.assertEqual(model.docvecs['_*0'].shape, (300,)) self.assertTrue(all(model.docvecs['_*0'] == model.docvecs[0])) self.assertTrue(max(d.index for d in model.docvecs.doctags.values()) < len(model.docvecs.doctag_syn0)) def test_empty_errors(self): # no input => "RuntimeError: you must first build vocabulary before training the model" self.assertRaises(RuntimeError, doc2vec.Doc2Vec, []) # input not empty, but rather completely filtered out self.assertRaises(RuntimeError, doc2vec.Doc2Vec, list_corpus, min_count=10000) def model_sanity(self, model): """Any non-trivial model on DocsLeeCorpus can pass these sanity checks""" fire1 = 0 # doc 0 sydney fires fire2 = 8 # doc 8 sydney fires tennis1 = 6 # doc 6 tennis # inferred vector should be top10 close to bulk-trained one doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words) sims_to_infer = model.docvecs.most_similar([doc0_inferred],topn=len(model.docvecs)) f_rank = [docid for docid, sim in sims_to_infer].index(fire1) self.assertLess(fire1, 10) # fire8 should be top30 close to fire1 sims = model.docvecs.most_similar(fire1, topn=len(model.docvecs)) f2_rank = [docid for docid, sim in sims_to_infer].index(fire2) self.assertLess(f2_rank, 30) # same sims should appear in lookup by vec as by index doc0_vec = model.docvecs[fire1] sims2 = model.docvecs.most_similar(positive=[doc0_vec], topn=21) sims2 = [(id, sim) for id, sim in sims2 if id != fire1] # ignore the doc itself sims = sims[:20] self.assertEqual(list(zip(*sims))[0], list(zip(*sims2))[0]) # same doc ids self.assertTrue(np.allclose(list(zip(*sims))[1], list(zip(*sims2))[1])) # close-enough dists # tennis doc should be out-of-place among fire news self.assertEqual(model.docvecs.doesnt_match([fire1, tennis1, fire2]), tennis1) # fire docs should be closer than fire-tennis self.assertTrue(model.docvecs.similarity(fire1, fire2) > model.docvecs.similarity(fire1, tennis1)) def test_training(self): """Test doc2vec training.""" corpus = DocsLeeCorpus() model = doc2vec.Doc2Vec(size=100, min_count=2, iter=20) model.build_vocab(corpus) self.assertEqual(model.docvecs.doctag_syn0.shape, (300, 100)) model.train(corpus) self.model_sanity(model) # build vocab and train in one step; must be the same as above model2 = doc2vec.Doc2Vec(corpus, size=100, min_count=2, iter=20) self.models_equal(model, model2) def test_dbow_hs(self): """Test DBOW doc2vec training.""" model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=1, negative=0, min_count=2, iter=20) self.model_sanity(model) def test_dmm_hs(self): """Test DM/mean doc2vec training.""" model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=1, negative=0, alpha=0.05, min_count=2, iter=20) self.model_sanity(model) def test_dms_hs(self): """Test DM/sum doc2vec training.""" model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=1, negative=0, alpha=0.05, min_count=2, iter=20) self.model_sanity(model) def test_dmc_hs(self): """Test DM/concatenate doc2vec training.""" model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=1, negative=0, alpha=0.05, min_count=2, iter=20) self.model_sanity(model) def test_dbow_neg(self): """Test DBOW doc2vec training.""" model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=0, negative=10, min_count=2, iter=20) self.model_sanity(model) def test_dmm_neg(self): """Test DM/mean doc2vec training.""" model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0, negative=10, alpha=0.05, min_count=2, iter=20) self.model_sanity(model) def test_dms_neg(self): """Test DM/sum doc2vec training.""" model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=0, negative=10, alpha=0.05, min_count=2, iter=20) self.model_sanity(model) def test_dmc_neg(self): """Test DM/concatenate doc2vec training.""" model = doc2vec.Doc2Vec(list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=0, negative=10, alpha=0.05, min_count=2, iter=20) self.model_sanity(model) def test_parallel(self): """Test doc2vec parallel training.""" if doc2vec.FAST_VERSION < 0: # don't test the plain NumPy version for parallelism (too slow) return corpus = utils.RepeatCorpus(DocsLeeCorpus(), 10000) for workers in [2, 4]: model = doc2vec.Doc2Vec(corpus, workers=workers) self.model_sanity(model) def test_deterministic_hs(self): """Test doc2vec results identical with identical RNG seed.""" # hs model = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1) model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1) self.models_equal(model, model2) def test_deterministic_neg(self): """Test doc2vec results identical with identical RNG seed.""" # neg model = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1) model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1) self.models_equal(model, model2) def test_deterministic_dmc(self): """Test doc2vec results identical with identical RNG seed.""" # bigger, dmc model = doc2vec.Doc2Vec(DocsLeeCorpus(), dm=1, dm_concat=1, size=24, window=4, hs=1, negative=3, seed=42, workers=1) model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), dm=1, dm_concat=1, size=24, window=4, hs=1, negative=3, seed=42, workers=1) self.models_equal(model, model2) def models_equal(self, model, model2): # check words/hidden-weights self.assertEqual(len(model.vocab), len(model2.vocab)) self.assertTrue(np.allclose(model.syn0, model2.syn0)) if model.hs: self.assertTrue(np.allclose(model.syn1, model2.syn1)) if model.negative: self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg)) # check docvecs self.assertEqual(len(model.docvecs.doctags), len(model2.docvecs.doctags)) self.assertEqual(len(model.docvecs.index2doctag), len(model2.docvecs.index2doctag)) self.assertTrue(np.allclose(model.docvecs.doctag_syn0, model2.docvecs.doctag_syn0)) #endclass TestDoc2VecModel if not hasattr(TestDoc2VecModel, 'assertLess'): # workaround for python 2.6 def assertLess(self, a, b, msg=None): self.assertTrue(a < b, msg="%s is not less than %s" % (a, b)) setattr(TestDoc2VecModel, 'assertLess', assertLess) # following code is useful for reproducing paragraph-vectors paper sentiment experiments class ConcatenatedDoc2Vec(object): """ Concatenation of multiple models for reproducing the Paragraph Vectors paper. Models must have exactly-matching vocabulary and document IDs. (Models should be trained separately; this wrapper just returns concatenated results.) """ def __init__(self, models): self.models = models if hasattr(models[0], 'docvecs'): self.docvecs = ConcatenatedDocvecs([model.docvecs for model in models]) def __getitem__(self, token): return np.concatenate([model[token] for model in self.models]) def infer_vector(self, document, alpha=0.1, min_alpha=0.0001, steps=5): return np.concatenate([model.infer_vector(document, alpha, min_alpha, steps) for model in self.models]) def train(self, ignored): pass # train subcomponents individually class ConcatenatedDocvecs(object): def __init__(self, models): self.models = models def __getitem__(self, token): return np.concatenate([model[token] for model in self.models]) SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment') def read_su_sentiment_rotten_tomatoes(dirname, lowercase=True): """ Read and return documents from the Stanford Sentiment Treebank corpus (Rotten Tomatoes reviews), from http://nlp.Stanford.edu/sentiment/ Initialize the corpus from a given directory, where http://nlp.stanford.edu/~socherr/stanfordSentimentTreebank.zip has been expanded. It's not too big, so compose entirely into memory. """ logging.info("loading corpus from %s" % dirname) # many mangled chars in sentences (datasetSentences.txt) chars_sst_mangled = ['à', 'á', 'â', 'ã', 'æ', 'ç', 'è', 'é', 'í', 'í', 'ï', 'ñ', 'ó', 'ô', 'ö', 'û', 'ü'] sentence_fixups = [(char.encode('utf-8').decode('latin1'), char) for char in chars_sst_mangled] # more junk, and the replace necessary for sentence-phrase consistency sentence_fixups.extend([ ('Â', ''), ('\xa0', ' '), ('-LRB-', '('), ('-RRB-', ')'), ]) # only this junk in phrases (dictionary.txt) phrase_fixups = [('\xa0', ' ')] # sentence_id and split are only positive for the full sentences # read sentences to temp {sentence -> (id,split) dict, to correlate with dictionary.txt info_by_sentence = {} with open(os.path.join(dirname, 'datasetSentences.txt'), 'r') as sentences: with open(os.path.join(dirname, 'datasetSplit.txt'), 'r') as splits: next(sentences) # legend next(splits) # legend for sentence_line, split_line in izip(sentences, splits): (id, text) = sentence_line.split('\t') id = int(id) text = text.rstrip() for junk, fix in sentence_fixups: text = text.replace(junk, fix) (id2, split_i) = split_line.split(',') assert id == int(id2) if text not in info_by_sentence: # discard duplicates info_by_sentence[text] = (id, int(split_i)) # read all phrase text phrases = [None] * 239232 # known size of phrases with open(os.path.join(dirname, 'dictionary.txt'), 'r') as phrase_lines: for line in phrase_lines: (text, id) = line.split('|') for junk, fix in phrase_fixups: text = text.replace(junk, fix) phrases[int(id)] = text.rstrip() # for 1st pass just string SentimentPhrase = namedtuple('SentimentPhrase', SentimentDocument._fields + ('sentence_id',)) # add sentiment labels, correlate with sentences with open(os.path.join(dirname, 'sentiment_labels.txt'), 'r') as sentiments: next(sentiments) # legend for line in sentiments: (id, sentiment) = line.split('|') id = int(id) sentiment = float(sentiment) text = phrases[id] words = text.split() if lowercase: words = [word.lower() for word in words] (sentence_id, split_i) = info_by_sentence.get(text, (None, 0)) split = [None, 'train', 'test', 'dev'][split_i] phrases[id] = SentimentPhrase(words, [id], split, sentiment, sentence_id) assert len([phrase for phrase in phrases if phrase.sentence_id is not None]) == len(info_by_sentence) # all # counts don't match 8544, 2210, 1101 because 13 TRAIN and 1 DEV sentences are duplicates assert len([phrase for phrase in phrases if phrase.split == 'train']) == 8531 # 'train' assert len([phrase for phrase in phrases if phrase.split == 'test']) == 2210 # 'test' assert len([phrase for phrase in phrases if phrase.split == 'dev']) == 1100 # 'dev' logging.info("loaded corpus with %i sentences and %i phrases from %s", len(info_by_sentence), len(phrases), dirname) return phrases if __name__ == '__main__': logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG) logging.info("using optimization %s", doc2vec.FAST_VERSION) unittest.main()
Hippu/pilttikronikka
refs/heads/master
kronikka/kronikka_conf/settings/local.py
1
from base import * DEBUG = True DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': PROJECT_ROOT.ancestor(1).child("dev.sqlite"), # Or path to database file if using sqlite3. # The following settings are not used with sqlite3: 'USER': '', 'PASSWORD': '', 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP. 'PORT': '', # Set to empty string for default. } } INTERNAL_IPS = ('127.0.0.1', ) INSTALLED_APPS += ('debug_toolbar', ) MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware', )
nop33/indico
refs/heads/master
indico/modules/events/models/settings.py
2
# This file is part of Indico. # Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN). # # Indico is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # Indico is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Indico; if not, see <http://www.gnu.org/licenses/>. from __future__ import unicode_literals from sqlalchemy.ext.declarative import declared_attr from indico.core.db import db from indico.core.db.sqlalchemy.util.models import auto_table_args from indico.core.settings.models.base import JSONSettingsBase, PrincipalSettingsBase from indico.util.decorators import strict_classproperty from indico.util.string import return_ascii class EventSettingsMixin(object): settings_backref_name = None @strict_classproperty @staticmethod def __auto_table_args(): return (db.Index(None, 'event_id', 'module', 'name'), db.Index(None, 'event_id', 'module'), {'schema': 'events'}) @declared_attr def event_id(cls): return db.Column( db.Integer, db.ForeignKey('events.events.id'), index=True, nullable=False ) @declared_attr def event(cls): return db.relationship( 'Event', lazy=True, backref=db.backref( cls.settings_backref_name, lazy='dynamic' ) ) class EventSetting(JSONSettingsBase, EventSettingsMixin, db.Model): settings_backref_name = 'settings' @strict_classproperty @staticmethod def __auto_table_args(): return db.UniqueConstraint('event_id', 'module', 'name'), @declared_attr def __table_args__(cls): return auto_table_args(cls) @return_ascii def __repr__(self): return '<EventSetting({}, {}, {}, {!r})>'.format(self.event_id, self.module, self.name, self.value) class EventSettingPrincipal(PrincipalSettingsBase, EventSettingsMixin, db.Model): principal_backref_name = 'in_event_settings_acls' settings_backref_name = 'settings_principals' extra_key_cols = ('event_id',) @declared_attr def __table_args__(cls): return auto_table_args(cls) @return_ascii def __repr__(self): return '<EventSettingPrincipal({}, {}, {}, {!r})>'.format(self.event_id, self.module, self.name, self.principal)
rherrick/XNATImageViewer
refs/heads/master
utility-scripts/python/_old/ImageGenerator/GenerateBySagittalReference.py
4
import sys import os from PIL import Image import shutil def generateCoronal(blankDir, genSize, refFileNames): print("Generating coronal images in %s"%(blankDir)) for root, dirs, files in os.walk(blankDir): for i in range(0, len(files)): # i is 256 fn = root + "/" + files[i] img = Image.open(fn) pix = img.load() size = img.size; for k in range(0, len(refFileNames)): refImg = Image.open(refFileNames[len(refFileNames) -k -1]); refPx = refImg.load(); refSize = refImg.size; for j in range(0, refSize[1]): # j is 256 pix[k, j] = refPx[ i , j] img.save(fn, "JPEG"); def generateAxial(blankDir, genSize, refFileNames): print("Generating axial images in %s"%(blankDir)) for root, dirs, files in os.walk(blankDir): for i in range(0, len(files)): # i is 160 fn = root + "/" + files[i] img = Image.open(fn) pix = img.load() size = img.size; for k in range(0, len(refFileNames)): refImg = Image.open(refFileNames[k]); refPx = refImg.load(); refSize = refImg.size; for j in range(0, refSize[1]): # j is 256 pix[k, j] = refPx[ j , (refSize[1]- i -1)] img.save(fn, "JPEG");
fpy171/django
refs/heads/master
tests/template_tests/syntax_tests/test_exceptions.py
513
from django.template import TemplateDoesNotExist, TemplateSyntaxError from django.test import SimpleTestCase from ..utils import setup from .test_extends import inheritance_templates class ExceptionsTests(SimpleTestCase): @setup({'exception01': "{% extends 'nonexistent' %}"}) def test_exception01(self): """ Raise exception for invalid template name """ with self.assertRaises(TemplateDoesNotExist): self.engine.render_to_string('exception01') @setup({'exception02': '{% extends nonexistent %}'}) def test_exception02(self): """ Raise exception for invalid variable template name """ if self.engine.string_if_invalid: with self.assertRaises(TemplateDoesNotExist): self.engine.render_to_string('exception02') else: with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('exception02') @setup( {'exception03': "{% extends 'inheritance01' %}" "{% block first %}2{% endblock %}{% extends 'inheritance16' %}"}, inheritance_templates, ) def test_exception03(self): """ Raise exception for extra {% extends %} tags """ with self.assertRaises(TemplateSyntaxError): self.engine.get_template('exception03') @setup( {'exception04': "{% extends 'inheritance17' %}{% block first %}{% echo 400 %}5678{% endblock %}"}, inheritance_templates, ) def test_exception04(self): """ Raise exception for custom tags used in child with {% load %} tag in parent, not in child """ with self.assertRaises(TemplateSyntaxError): self.engine.get_template('exception04') @setup({'exception05': '{% block first %}{{ block.super }}{% endblock %}'}) def test_exception05(self): """ Raise exception for block.super used in base template """ with self.assertRaises(TemplateSyntaxError): self.engine.render_to_string('exception05')
williamsjj/shiji
refs/heads/master
shiji/stats/vendor/txstatsd/tests/test_router.py
3
# Copyright (C) 2011-2012 Canonical Services Ltd # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from unittest import TestCase from twisted.internet.protocol import DatagramProtocol, Factory from twisted.protocols.basic import LineReceiver from twisted.application.service import MultiService from twisted.internet import reactor, defer from twisted.trial.unittest import TestCase as TxTestCase from txstatsd.server.processor import MessageProcessor from txstatsd.server.router import Router class TestMessageProcessor(object): def __init__(self): self.messages = [] def process_message(self, *args): self.messages.append(args) def flush(self): pass class RouteMessagesTest(TestCase): def setUp(self): self.processor = TestMessageProcessor() self.router = Router(self.processor, "") def update_rules(self, rules_config): self.router.rules = self.router.build_rules(rules_config) def test_message_processor_integration(self): """ A message gets routed to the processor. """ processor = MessageProcessor() router = Router(processor, "") router.process("gorets:1|c") self.assertEqual(len(processor.counter_metrics), 1) def test_receive_counter(self): self.router.process("gorets:1|c") self.assertEqual(len(self.processor.messages), 1) def test_any_and_drop(self): """ Any message gets dropped with the drop rule. """ self.update_rules("any => drop") self.router.process("gorets:1|c") self.assertEqual(len(self.processor.messages), 0) def test_metric_path_like(self): """ path_like matches glob expressions. """ self.update_rules("path_like goret* => drop") self.router.process("gorets:1|c") self.router.process("gorets:1|d") self.router.process("goret:1|d") self.router.process("nomatch:1|d") self.assertEqual(len(self.processor.messages), 1) self.assertEqual(self.processor.messages[0][2], "nomatch") def test_receive_two_rules_no_match(self): """ Messages that do not match more than one rule are processed just fine. """ self.update_rules("path_like goret* => drop\npath_like glork* => drop\n") self.router.process("nomatch:1|c") self.assertEqual(len(self.processor.messages), 1) def test_not(self): """ Messages not matching the path_like expression get dropped. """ self.update_rules("not path_like goret* => drop") self.router.process("gorets:1|c") self.router.process("nomatch:1|d") self.assertEqual(len(self.processor.messages), 1) self.assertEqual(self.processor.messages[0][2], "gorets") def test_rewrite(self): """ Process all messages but only rewrite matching ones. """ self.update_rules(r"any => rewrite (gorets) glork.\1") self.router.process("gorets:1|c") self.router.process("nomatch:1|d") self.assertEqual(len(self.processor.messages), 2) self.assertEqual(self.processor.messages[0][2], "glork.gorets") self.assertEqual(self.processor.messages[1][2], "nomatch") def test_rewrite_and_dup(self): """ Process all messages but only rewrite matching ones. If dup flag is set then duplicate original message without rewriting it. """ self.update_rules(r"any => rewrite (gorets) glork.\1 dup") self.router.process("gorets:1|c") self.router.process("nomatch:1|d") self.assertEqual(len(self.processor.messages), 3) self.assertEqual(self.processor.messages[0][2], "gorets") self.assertEqual(self.processor.messages[1][2], "glork.gorets") self.assertEqual(self.processor.messages[2][2], "nomatch") def test_rewrite_and_no_dup(self): """ Process all messages but only rewrite matching ones. If dup flag is set to no-dup, then the original message is not duplicated. """ self.update_rules(r"any => rewrite (gorets) glork.\1 no-dup") self.router.process("gorets:1|c") self.router.process("nomatch:1|d") self.assertEqual(len(self.processor.messages), 2) self.assertEqual(self.processor.messages[0][2], "glork.gorets") self.assertEqual(self.processor.messages[1][2], "nomatch") def test_set_metric_type(self): """ Set metric type to something else. """ self.update_rules(r"any => set_metric_type d") self.router.process("gorets:1|c") self.assertEqual(self.processor.messages[0][1], "d") self.assertEqual(self.processor.messages[0][2], "gorets") def test_set_metric_type_dup(self): """ Set metric type to something else. If the dup flag is set, duplicate the original message. """ self.update_rules(r"any => set_metric_type d dup") self.router.process("gorets:1|c") self.assertEqual(self.processor.messages[0][1], "c") self.assertEqual(self.processor.messages[0][2], "gorets") self.assertEqual(self.processor.messages[1][1], "d") self.assertEqual(self.processor.messages[1][2], "gorets") def test_set_metric_type_no_dup(self): """ Set metric type to something else. If the dup flag is set to no-dup then do not duplicate the original message. """ self.update_rules(r"any => set_metric_type d no-dup") self.router.process("gorets:1|c") self.assertEqual(self.processor.messages[0][1], "d") self.assertEqual(self.processor.messages[0][2], "gorets") class TestUDPRedirect(TxTestCase): def setUp(self): self.service = MultiService() self.received = [] class Collect(DatagramProtocol): def datagramReceived(cself, data, host_port): self.got_data(data) self.port = reactor.listenUDP(0, Collect()) self.processor = TestMessageProcessor() self.router = Router(self.processor, r"any => redirect_udp 127.0.0.1 %s" % (self.port.getHost().port,), service=self.service) self.service.startService() return self.router.ready @defer.inlineCallbacks def tearDown(self): yield self.service.stopService() self.port.stopListening() def test_redirect(self): """ Any message gets dropped with the drop rule. """ message = "gorets:1|c" d = defer.Deferred() def got_data(data): self.assertEqual(data, message) d.callback(True) self.got_data = got_data self.router.process(message) return d class TestTCPRedirect(TestUDPRedirect): def setUp(self): self.service = MultiService() self.received = [] class Collect(LineReceiver): def lineReceived(cself, data): self.got_data(data) class CollectFactory(Factory): def buildProtocol(self, addr): return Collect() self.port = reactor.listenTCP(0, CollectFactory()) self.processor = TestMessageProcessor() self.router = Router(self.processor, r"any => redirect_tcp 127.0.0.1 %s" % (self.port.getHost().port,), service=self.service) self.service.startService() return self.router.ready
GustavoHennig/ansible
refs/heads/devel
lib/ansible/module_utils/azure_rm_common.py
48
# # Copyright (c) 2016 Matt Davis, <mdavis@ansible.com> # Chris Houseknecht, <house@redhat.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # import json import os import re import sys import copy import importlib import inspect from packaging.version import Version from os.path import expanduser from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.six.moves import configparser AZURE_COMMON_ARGS = dict( profile=dict(type='str'), subscription_id=dict(type='str', no_log=True), client_id=dict(type='str', no_log=True), secret=dict(type='str', no_log=True), tenant=dict(type='str', no_log=True), ad_user=dict(type='str', no_log=True), password=dict(type='str', no_log=True), # debug=dict(type='bool', default=False), ) AZURE_CREDENTIAL_ENV_MAPPING = dict( profile='AZURE_PROFILE', subscription_id='AZURE_SUBSCRIPTION_ID', client_id='AZURE_CLIENT_ID', secret='AZURE_SECRET', tenant='AZURE_TENANT', ad_user='AZURE_AD_USER', password='AZURE_PASSWORD' ) AZURE_TAG_ARGS = dict( tags=dict(type='dict'), append_tags=dict(type='bool', default=True), ) AZURE_COMMON_REQUIRED_IF = [ ('log_mode', 'file', ['log_path']) ] ANSIBLE_USER_AGENT = 'Ansible-Deploy' CIDR_PATTERN = re.compile("(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1" "[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))") AZURE_SUCCESS_STATE = "Succeeded" AZURE_FAILED_STATE = "Failed" HAS_AZURE = True HAS_AZURE_EXC = None HAS_MSRESTAZURE = True HAS_MSRESTAZURE_EXC = None # NB: packaging issue sometimes cause msrestazure not to be installed, check it separately try: from msrest.serialization import Serializer except ImportError as exc: HAS_MSRESTAZURE_EXC = exc HAS_MSRESTAZURE = False try: from enum import Enum from msrestazure.azure_exceptions import CloudError from azure.mgmt.network.models import PublicIPAddress, NetworkSecurityGroup, SecurityRule, NetworkInterface, \ NetworkInterfaceIPConfiguration, Subnet from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials from azure.mgmt.network.version import VERSION as network_client_version from azure.mgmt.storage.version import VERSION as storage_client_version from azure.mgmt.compute.version import VERSION as compute_client_version from azure.mgmt.resource.version import VERSION as resource_client_version from azure.mgmt.network.network_management_client import NetworkManagementClient from azure.mgmt.resource.resources.resource_management_client import ResourceManagementClient from azure.mgmt.storage.storage_management_client import StorageManagementClient from azure.mgmt.compute.compute_management_client import ComputeManagementClient from azure.storage.cloudstorageaccount import CloudStorageAccount except ImportError as exc: HAS_AZURE_EXC = exc HAS_AZURE = False def azure_id_to_dict(id): pieces = re.sub(r'^\/', '', id).split('/') result = {} index = 0 while index < len(pieces) - 1: result[pieces[index]] = pieces[index + 1] index += 1 return result AZURE_EXPECTED_VERSIONS = dict( storage_client_version="0.30.0rc5", compute_client_version="0.30.0rc5", network_client_version="0.30.0rc5", resource_client_version="0.30.0rc5" ) AZURE_MIN_RELEASE = '2.0.0rc5' class AzureRMModuleBase(object): def __init__(self, derived_arg_spec, bypass_checks=False, no_log=False, check_invalid_arguments=True, mutually_exclusive=None, required_together=None, required_one_of=None, add_file_common_args=False, supports_check_mode=False, required_if=None, supports_tags=True, facts_module=False): merged_arg_spec = dict() merged_arg_spec.update(AZURE_COMMON_ARGS) if supports_tags: merged_arg_spec.update(AZURE_TAG_ARGS) if derived_arg_spec: merged_arg_spec.update(derived_arg_spec) merged_required_if = list(AZURE_COMMON_REQUIRED_IF) if required_if: merged_required_if += required_if self.module = AnsibleModule(argument_spec=merged_arg_spec, bypass_checks=bypass_checks, no_log=no_log, check_invalid_arguments=check_invalid_arguments, mutually_exclusive=mutually_exclusive, required_together=required_together, required_one_of=required_one_of, add_file_common_args=add_file_common_args, supports_check_mode=supports_check_mode, required_if=merged_required_if) if not HAS_MSRESTAZURE: self.fail("Do you have msrestazure installed? Try `pip install msrestazure`" "- {0}".format(HAS_MSRESTAZURE_EXC)) if not HAS_AZURE: self.fail("Do you have azure>={1} installed? Try `pip install 'azure>={1}' --upgrade`" "- {0}".format(HAS_AZURE_EXC, AZURE_MIN_RELEASE)) self._network_client = None self._storage_client = None self._resource_client = None self._compute_client = None self.check_mode = self.module.check_mode self.facts_module = facts_module # self.debug = self.module.params.get('debug') # authenticate self.credentials = self._get_credentials(self.module.params) if not self.credentials: self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " "or define a profile in ~/.azure/credentials.") if self.credentials.get('subscription_id', None) is None: self.fail("Credentials did not include a subscription_id value.") self.log("setting subscription_id") self.subscription_id = self.credentials['subscription_id'] if self.credentials.get('client_id') is not None and \ self.credentials.get('secret') is not None and \ self.credentials.get('tenant') is not None: self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], secret=self.credentials['secret'], tenant=self.credentials['tenant']) elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], self.credentials['password']) else: self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " "Credentials must include client_id, secret and tenant or ad_user and password.") # common parameter validation if self.module.params.get('tags'): self.validate_tags(self.module.params['tags']) res = self.exec_module(**self.module.params) self.module.exit_json(**res) def check_client_version(self, client_name, client_version, expected_version): # Ensure Azure modules are at least 2.0.0rc5. if Version(client_version) < Version(expected_version): self.fail("Installed {0} client version is {1}. The supported version is {2}. Try " "`pip install azure>={3} --upgrade`".format(client_name, client_version, expected_version, AZURE_MIN_RELEASE)) def exec_module(self, **kwargs): self.fail("Error: {0} failed to implement exec_module method.".format(self.__class__.__name__)) def fail(self, msg, **kwargs): ''' Shortcut for calling module.fail() :param msg: Error message text. :param kwargs: Any key=value pairs :return: None ''' self.module.fail_json(msg=msg, **kwargs) def log(self, msg, pretty_print=False): pass # Use only during module development #if self.debug: # log_file = open('azure_rm.log', 'a') # if pretty_print: # log_file.write(json.dumps(msg, indent=4, sort_keys=True)) # else: # log_file.write(msg + u'\n') def validate_tags(self, tags): ''' Check if tags dictionary contains string:string pairs. :param tags: dictionary of string:string pairs :return: None ''' if not self.facts_module: if not isinstance(tags, dict): self.fail("Tags must be a dictionary of string:string values.") for key, value in tags.items(): if not isinstance(value, str): self.fail("Tags values must be strings. Found {0}:{1}".format(str(key), str(value))) def update_tags(self, tags): ''' Call from the module to update metadata tags. Returns tuple with bool indicating if there was a change and dict of new tags to assign to the object. :param tags: metadata tags from the object :return: bool, dict ''' new_tags = copy.copy(tags) if isinstance(tags, dict) else dict() changed = False if isinstance(self.module.params.get('tags'), dict): for key, value in self.module.params['tags'].items(): if not new_tags.get(key) or new_tags[key] != value: changed = True new_tags[key] = value if isinstance(tags, dict): for key, value in tags.items(): if not self.module.params['tags'].get(key): new_tags.pop(key) changed = True return changed, new_tags def has_tags(self, obj_tags, tag_list): ''' Used in fact modules to compare object tags to list of parameter tags. Return true if list of parameter tags exists in object tags. :param obj_tags: dictionary of tags from an Azure object. :param tag_list: list of tag keys or tag key:value pairs :return: bool ''' if not obj_tags and tag_list: return False if not tag_list: return True matches = 0 result = False for tag in tag_list: tag_key = tag tag_value = None if ':' in tag: tag_key, tag_value = tag.split(':') if tag_value and obj_tags.get(tag_key) == tag_value: matches += 1 elif not tag_value and obj_tags.get(tag_key): matches += 1 if matches == len(tag_list): result = True return result def get_resource_group(self, resource_group): ''' Fetch a resource group. :param resource_group: name of a resource group :return: resource group object ''' try: return self.rm_client.resource_groups.get(resource_group) except CloudError: self.fail("Parameter error: resource group {0} not found".format(resource_group)) except Exception as exc: self.fail("Error retrieving resource group {0} - {1}".format(resource_group, str(exc))) def _get_profile(self, profile="default"): path = expanduser("~/.azure/credentials") try: config = configparser.ConfigParser() config.read(path) except Exception as exc: self.fail("Failed to access {0}. Check that the file exists and you have read " "access. {1}".format(path, str(exc))) credentials = dict() for key in AZURE_CREDENTIAL_ENV_MAPPING: try: credentials[key] = config.get(profile, key, raw=True) except: pass if credentials.get('subscription_id'): return credentials return None def _get_env_credentials(self): env_credentials = dict() for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): env_credentials[attribute] = os.environ.get(env_variable, None) if env_credentials['profile']: credentials = self._get_profile(env_credentials['profile']) return credentials if env_credentials.get('subscription_id') is not None: return env_credentials return None def _get_credentials(self, params): # Get authentication credentials. # Precedence: module parameters-> environment variables-> default profile in ~/.azure/credentials. self.log('Getting credentials') arg_credentials = dict() for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): arg_credentials[attribute] = params.get(attribute, None) # try module params if arg_credentials['profile'] is not None: self.log('Retrieving credentials with profile parameter.') credentials = self._get_profile(arg_credentials['profile']) return credentials if arg_credentials['subscription_id']: self.log('Received credentials from parameters.') return arg_credentials # try environment env_credentials = self._get_env_credentials() if env_credentials: self.log('Received credentials from env.') return env_credentials # try default profile from ~./azure/credentials default_credentials = self._get_profile() if default_credentials: self.log('Retrieved default profile credentials from ~/.azure/credentials.') return default_credentials return None def serialize_obj(self, obj, class_name, enum_modules=[]): ''' Return a JSON representation of an Azure object. :param obj: Azure object :param class_name: Name of the object's class :param enum_modules: List of module names to build enum dependencies from. :return: serialized result ''' dependencies = dict() if enum_modules: for module_name in enum_modules: mod = importlib.import_module(module_name) for mod_class_name, mod_class_obj in inspect.getmembers(mod, predicate=inspect.isclass): dependencies[mod_class_name] = mod_class_obj self.log("dependencies: ") self.log(str(dependencies)) serializer = Serializer(classes=dependencies) return serializer.body(obj, class_name) def get_poller_result(self, poller, wait=5): ''' Consistent method of waiting on and retrieving results from Azure's long poller :param poller Azure poller object :return object resulting from the original request ''' try: delay = wait while not poller.done(): self.log("Waiting for {0} sec".format(delay)) poller.wait(timeout=delay) return poller.result() except Exception as exc: self.log(str(exc)) raise def check_provisioning_state(self, azure_object, requested_state='present'): ''' Check an Azure object's provisioning state. If something did not complete the provisioning process, then we cannot operate on it. :param azure_object An object such as a subnet, storageaccount, etc. Must have provisioning_state and name attributes. :return None ''' if hasattr(azure_object, 'properties') and hasattr(azure_object.properties, 'provisioning_state') and \ hasattr(azure_object, 'name'): # resource group object fits this model if isinstance(azure_object.properties.provisioning_state, Enum): if azure_object.properties.provisioning_state.value != AZURE_SUCCESS_STATE and \ requested_state != 'absent': self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE)) return if azure_object.properties.provisioning_state != AZURE_SUCCESS_STATE and \ requested_state != 'absent': self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( azure_object.name, azure_object.properties.provisioning_state, AZURE_SUCCESS_STATE)) return if hasattr(azure_object, 'provisioning_state') or not hasattr(azure_object, 'name'): if isinstance(azure_object.provisioning_state, Enum): if azure_object.provisioning_state.value != AZURE_SUCCESS_STATE and requested_state != 'absent': self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE)) return if azure_object.provisioning_state != AZURE_SUCCESS_STATE and requested_state != 'absent': self.fail("Error {0} has a provisioning state of {1}. Expecting state to be {2}.".format( azure_object.name, azure_object.provisioning_state, AZURE_SUCCESS_STATE)) def get_blob_client(self, resource_group_name, storage_account_name): keys = dict() try: # Get keys from the storage account self.log('Getting keys') account_keys = self.storage_client.storage_accounts.list_keys(resource_group_name, storage_account_name) except Exception as exc: self.fail("Error getting keys for account {0} - {1}".format(storage_account_name, str(exc))) try: self.log('Create blob service') return CloudStorageAccount(storage_account_name, account_keys.keys[0].value).create_block_blob_service() except Exception as exc: self.fail("Error creating blob service client for storage account {0} - {1}".format(storage_account_name, str(exc))) def create_default_pip(self, resource_group, location, name, allocation_method='Dynamic'): ''' Create a default public IP address <name>01 to associate with a network interface. If a PIP address matching <vm name>01 exists, return it. Otherwise, create one. :param resource_group: name of an existing resource group :param location: a valid azure location :param name: base name to assign the public IP address :param allocation_method: one of 'Static' or 'Dynamic' :return: PIP object ''' public_ip_name = name + '01' pip = None self.log("Starting create_default_pip {0}".format(public_ip_name)) self.log("Check to see if public IP {0} exists".format(public_ip_name)) try: pip = self.network_client.public_ip_addresses.get(resource_group, public_ip_name) except CloudError: pass if pip: self.log("Public ip {0} found.".format(public_ip_name)) self.check_provisioning_state(pip) return pip params = PublicIPAddress( location=location, public_ip_allocation_method=allocation_method, ) self.log('Creating default public IP {0}'.format(public_ip_name)) try: poller = self.network_client.public_ip_addresses.create_or_update(resource_group, public_ip_name, params) except Exception as exc: self.fail("Error creating {0} - {1}".format(public_ip_name, str(exc))) return self.get_poller_result(poller) def create_default_securitygroup(self, resource_group, location, name, os_type, open_ports): ''' Create a default security group <name>01 to associate with a network interface. If a security group matching <name>01 exists, return it. Otherwise, create one. :param resource_group: Resource group name :param location: azure location name :param name: base name to use for the security group :param os_type: one of 'Windows' or 'Linux'. Determins any default rules added to the security group. :param ssh_port: for os_type 'Linux' port used in rule allowing SSH access. :param rdp_port: for os_type 'Windows' port used in rule allowing RDP access. :return: security_group object ''' security_group_name = name + '01' group = None self.log("Create security group {0}".format(security_group_name)) self.log("Check to see if security group {0} exists".format(security_group_name)) try: group = self.network_client.network_security_groups.get(resource_group, security_group_name) except CloudError: pass if group: self.log("Security group {0} found.".format(security_group_name)) self.check_provisioning_state(group) return group parameters = NetworkSecurityGroup() parameters.location = location if not open_ports: # Open default ports based on OS type if os_type == 'Linux': # add an inbound SSH rule parameters.security_rules = [ SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow SSH Access', source_port_range='*', destination_port_range='22', priority=100, name='SSH') ] parameters.location = location else: # for windows add inbound RDP and WinRM rules parameters.security_rules = [ SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow RDP port 3389', source_port_range='*', destination_port_range='3389', priority=100, name='RDP01'), SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', description='Allow WinRM HTTPS port 5986', source_port_range='*', destination_port_range='5986', priority=101, name='WinRM01'), ] else: # Open custom ports parameters.security_rules = [] priority = 100 for port in open_ports: priority += 1 rule_name = "Rule_{0}".format(priority) parameters.security_rules.append( SecurityRule('Tcp', '*', '*', 'Allow', 'Inbound', source_port_range='*', destination_port_range=str(port), priority=priority, name=rule_name) ) self.log('Creating default security group {0}'.format(security_group_name)) try: poller = self.network_client.network_security_groups.create_or_update(resource_group, security_group_name, parameters) except Exception as exc: self.fail("Error creating default security rule {0} - {1}".format(security_group_name, str(exc))) return self.get_poller_result(poller) def _register(self, key): try: # We have to perform the one-time registration here. Otherwise, we receive an error the first # time we attempt to use the requested client. resource_client = self.rm_client resource_client.providers.register(key) except Exception as exc: self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) self.log("You might need to register {0} using an admin account".format(key)) self.log(("To register a provider using the Python CLI: " "https://docs.microsoft.com/azure/azure-resource-manager/" "resource-manager-common-deployment-errors#noregisteredproviderfound")) @property def storage_client(self): self.log('Getting storage client...') if not self._storage_client: self.check_client_version('storage', storage_client_version, AZURE_EXPECTED_VERSIONS['storage_client_version']) self._storage_client = StorageManagementClient(self.azure_credentials, self.subscription_id) self._register('Microsoft.Storage') return self._storage_client @property def network_client(self): self.log('Getting network client') if not self._network_client: self.check_client_version('network', network_client_version, AZURE_EXPECTED_VERSIONS['network_client_version']) self._network_client = NetworkManagementClient(self.azure_credentials, self.subscription_id) self._register('Microsoft.Network') return self._network_client @property def rm_client(self): self.log('Getting resource manager client') if not self._resource_client: self.check_client_version('resource', resource_client_version, AZURE_EXPECTED_VERSIONS['resource_client_version']) self._resource_client = ResourceManagementClient(self.azure_credentials, self.subscription_id) return self._resource_client @property def compute_client(self): self.log('Getting compute client') if not self._compute_client: self.check_client_version('compute', compute_client_version, AZURE_EXPECTED_VERSIONS['compute_client_version']) self._compute_client = ComputeManagementClient(self.azure_credentials, self.subscription_id) self._register('Microsoft.Compute') return self._compute_client
ales-erjavec/orange
refs/heads/master
Orange/OrangeCanvas/document/schemeedit.py
6
""" ==================== Scheme Editor Widget ==================== """ import sys import logging import itertools import unicodedata from operator import attrgetter from contextlib import nested from urllib import urlencode from PyQt4.QtGui import ( QWidget, QVBoxLayout, QInputDialog, QMenu, QAction, QActionGroup, QKeySequence, QUndoStack, QGraphicsItem, QGraphicsObject, QGraphicsTextItem, QCursor, QFont, QPainter, QPixmap, QColor, QIcon, QWhatsThisClickedEvent, QBrush ) from PyQt4.QtCore import ( Qt, QObject, QEvent, QSignalMapper, QRectF, QCoreApplication ) from PyQt4.QtCore import pyqtProperty as Property, pyqtSignal as Signal from ..registry.qt import whats_this_helper from ..gui.quickhelp import QuickHelpTipEvent from ..gui.utils import message_information, disabled from ..scheme import ( scheme, signalmanager, SchemeNode, SchemeLink, BaseSchemeAnnotation ) from ..scheme import widgetsscheme from ..canvas.scene import CanvasScene from ..canvas.view import CanvasView from ..canvas import items from . import interactions from . import commands from . import quickmenu log = logging.getLogger(__name__) # TODO: Should this be moved to CanvasScene? class GraphicsSceneFocusEventListener(QGraphicsObject): itemFocusedIn = Signal(QGraphicsItem) itemFocusedOut = Signal(QGraphicsItem) def __init__(self, parent=None): QGraphicsObject.__init__(self, parent) self.setFlag(QGraphicsItem.ItemHasNoContents) def sceneEventFilter(self, obj, event): if event.type() == QEvent.FocusIn and \ obj.flags() & QGraphicsItem.ItemIsFocusable: obj.focusInEvent(event) if obj.hasFocus(): self.itemFocusedIn.emit(obj) return True elif event.type() == QEvent.FocusOut: obj.focusOutEvent(event) if not obj.hasFocus(): self.itemFocusedOut.emit(obj) return True return QGraphicsObject.sceneEventFilter(self, obj, event) def boundingRect(self): return QRectF() class SchemeEditWidget(QWidget): """ A widget for editing a :class:`~.scheme.Scheme` instance. """ #: Undo command has become available/unavailable. undoAvailable = Signal(bool) #: Redo command has become available/unavailable. redoAvailable = Signal(bool) #: Document modified state has changed. modificationChanged = Signal(bool) #: Undo command was added to the undo stack. undoCommandAdded = Signal() #: Item selection has changed. selectionChanged = Signal() #: Document title has changed. titleChanged = Signal(unicode) #: Document path has changed. pathChanged = Signal(unicode) # Quick Menu triggers (NoTriggers, RightClicked, DoubleClicked, SpaceKey, AnyKey) = [0, 1, 2, 4, 8] def __init__(self, parent=None, ): QWidget.__init__(self, parent) self.__modified = False self.__registry = None self.__scheme = None self.__path = u"" self.__quickMenuTriggers = SchemeEditWidget.SpaceKey | \ SchemeEditWidget.DoubleClicked self.__emptyClickButtons = 0 self.__channelNamesVisible = True self.__nodeAnimationEnabled = True self.__possibleSelectionHandler = None self.__possibleMouseItemsMove = False self.__itemsMoving = {} self.__contextMenuTarget = None self.__quickMenu = None self.__quickTip = "" self.__undoStack = QUndoStack(self) self.__undoStack.cleanChanged[bool].connect(self.__onCleanChanged) # scheme node properties when set to a clean state self.__cleanProperties = [] self.__editFinishedMapper = QSignalMapper(self) self.__editFinishedMapper.mapped[QObject].connect( self.__onEditingFinished ) self.__annotationGeomChanged = QSignalMapper(self) self.__setupActions() self.__setupUi() self.__editMenu = QMenu(self.tr("&Edit"), self) self.__editMenu.addAction(self.__undoAction) self.__editMenu.addAction(self.__redoAction) self.__editMenu.addSeparator() self.__editMenu.addAction(self.__selectAllAction) self.__widgetMenu = QMenu(self.tr("&Widget"), self) self.__widgetMenu.addAction(self.__openSelectedAction) self.__widgetMenu.addSeparator() self.__widgetMenu.addAction(self.__renameAction) self.__widgetMenu.addAction(self.__removeSelectedAction) self.__widgetMenu.addSeparator() self.__widgetMenu.addAction(self.__helpAction) self.__linkMenu = QMenu(self.tr("Link"), self) self.__linkMenu.addAction(self.__linkEnableAction) self.__linkMenu.addSeparator() self.__linkMenu.addAction(self.__linkRemoveAction) self.__linkMenu.addAction(self.__linkResetAction) def __setupActions(self): self.__zoomAction = \ QAction(self.tr("Zoom"), self, objectName="zoom-action", checkable=True, shortcut=QKeySequence.ZoomIn, toolTip=self.tr("Zoom in the scheme."), toggled=self.toggleZoom, ) self.__cleanUpAction = \ QAction(self.tr("Clean Up"), self, objectName="cleanup-action", toolTip=self.tr("Align widget to a grid."), triggered=self.alignToGrid, ) self.__newTextAnnotationAction = \ QAction(self.tr("Text"), self, objectName="new-text-action", toolTip=self.tr("Add a text annotation to the scheme."), checkable=True, toggled=self.__toggleNewTextAnnotation, ) # Create a font size menu for the new annotation action. self.__fontMenu = QMenu("Font Size", self) self.__fontActionGroup = group = \ QActionGroup(self, exclusive=True, triggered=self.__onFontSizeTriggered) def font(size): f = QFont(self.font()) f.setPixelSize(size) return f for size in [12, 14, 16, 18, 20, 22, 24]: action = QAction("%ipx" % size, group, checkable=True, font=font(size)) self.__fontMenu.addAction(action) group.actions()[2].setChecked(True) self.__newTextAnnotationAction.setMenu(self.__fontMenu) self.__newArrowAnnotationAction = \ QAction(self.tr("Arrow"), self, objectName="new-arrow-action", toolTip=self.tr("Add a arrow annotation to the scheme."), checkable=True, toggled=self.__toggleNewArrowAnnotation, ) # Create a color menu for the arrow annotation action self.__arrowColorMenu = QMenu("Arrow Color",) self.__arrowColorActionGroup = group = \ QActionGroup(self, exclusive=True, triggered=self.__onArrowColorTriggered) def color_icon(color): icon = QIcon() for size in [16, 24, 32]: pixmap = QPixmap(size, size) pixmap.fill(QColor(0, 0, 0, 0)) p = QPainter(pixmap) p.setRenderHint(QPainter.Antialiasing) p.setBrush(color) p.setPen(Qt.NoPen) p.drawEllipse(1, 1, size - 2, size - 2) p.end() icon.addPixmap(pixmap) return icon for color in ["#000", "#C1272D", "#662D91", "#1F9CDF", "#39B54A"]: icon = color_icon(QColor(color)) action = QAction(group, icon=icon, checkable=True, iconVisibleInMenu=True) action.setData(color) self.__arrowColorMenu.addAction(action) group.actions()[1].setChecked(True) self.__newArrowAnnotationAction.setMenu(self.__arrowColorMenu) self.__undoAction = self.__undoStack.createUndoAction(self) self.__undoAction.setShortcut(QKeySequence.Undo) self.__undoAction.setObjectName("undo-action") self.__redoAction = self.__undoStack.createRedoAction(self) self.__redoAction.setShortcut(QKeySequence.Redo) self.__redoAction.setObjectName("redo-action") self.__selectAllAction = \ QAction(self.tr("Select all"), self, objectName="select-all-action", toolTip=self.tr("Select all items."), triggered=self.selectAll, shortcut=QKeySequence.SelectAll ) self.__openSelectedAction = \ QAction(self.tr("Open"), self, objectName="open-action", toolTip=self.tr("Open selected widget"), triggered=self.openSelected, enabled=False) self.__removeSelectedAction = \ QAction(self.tr("Remove"), self, objectName="remove-selected", toolTip=self.tr("Remove selected items"), triggered=self.removeSelected, enabled=False ) shortcuts = [Qt.Key_Delete, Qt.ControlModifier + Qt.Key_Backspace] if sys.platform == "darwin": # Command Backspace should be the first # (visible shortcut in the menu) shortcuts.reverse() self.__removeSelectedAction.setShortcuts(shortcuts) self.__renameAction = \ QAction(self.tr("Rename"), self, objectName="rename-action", toolTip=self.tr("Rename selected widget"), triggered=self.__onRenameAction, shortcut=QKeySequence(Qt.Key_F2), enabled=False) self.__helpAction = \ QAction(self.tr("Help"), self, objectName="help-action", toolTip=self.tr("Show widget help"), triggered=self.__onHelpAction, shortcut=QKeySequence("F1"), enabled=False, ) self.__linkEnableAction = \ QAction(self.tr("Enabled"), self, objectName="link-enable-action", triggered=self.__toggleLinkEnabled, checkable=True, ) self.__linkRemoveAction = \ QAction(self.tr("Remove"), self, objectName="link-remove-action", triggered=self.__linkRemove, toolTip=self.tr("Remove link."), ) self.__linkResetAction = \ QAction(self.tr("Reset Signals"), self, objectName="link-reset-action", triggered=self.__linkReset, ) self.addActions([self.__newTextAnnotationAction, self.__newArrowAnnotationAction, self.__linkEnableAction, self.__linkRemoveAction, self.__linkResetAction]) # Actions which should be disabled while a multistep # interaction is in progress. self.__disruptiveActions = \ [self.__undoAction, self.__redoAction, self.__removeSelectedAction, self.__selectAllAction] def __setupUi(self): layout = QVBoxLayout() layout.setContentsMargins(0, 0, 0, 0) layout.setSpacing(0) scene = CanvasScene() self.__setupScene(scene) view = CanvasView(scene) view.setFrameStyle(CanvasView.NoFrame) view.setRenderHint(QPainter.Antialiasing) view.setContextMenuPolicy(Qt.CustomContextMenu) view.customContextMenuRequested.connect( self.__onCustomContextMenuRequested ) self.__view = view self.__scene = scene layout.addWidget(view) self.setLayout(layout) def __setupScene(self, scene): """ Set up a :class:`CanvasScene` instance for use by the editor. .. note:: If an existing scene is in use it must be teared down using __teardownScene """ scene.set_channel_names_visible(self.__channelNamesVisible) scene.set_node_animation_enabled( self.__nodeAnimationEnabled ) scene.setFont(self.font()) scene.installEventFilter(self) scene.set_registry(self.__registry) # Focus listener self.__focusListener = GraphicsSceneFocusEventListener() self.__focusListener.itemFocusedIn.connect( self.__onItemFocusedIn ) self.__focusListener.itemFocusedOut.connect( self.__onItemFocusedOut ) scene.addItem(self.__focusListener) scene.selectionChanged.connect( self.__onSelectionChanged ) scene.node_item_activated.connect( self.__onNodeActivate ) scene.annotation_added.connect( self.__onAnnotationAdded ) scene.annotation_removed.connect( self.__onAnnotationRemoved ) self.__annotationGeomChanged = QSignalMapper(self) def __teardownScene(self, scene): """ Tear down an instance of :class:`CanvasScene` that was used by the editor. """ # Clear the current item selection in the scene so edit action # states are updated accordingly. scene.clearSelection() # Clear focus from any item. scene.setFocusItem(None) # Clear the annotation mapper self.__annotationGeomChanged.deleteLater() self.__annotationGeomChanged = None self.__focusListener.itemFocusedIn.disconnect( self.__onItemFocusedIn ) self.__focusListener.itemFocusedOut.disconnect( self.__onItemFocusedOut ) scene.selectionChanged.disconnect( self.__onSelectionChanged ) scene.removeEventFilter(self) # Clear all items from the scene scene.blockSignals(True) scene.clear_scene() def toolbarActions(self): """ Return a list of actions that can be inserted into a toolbar. At the moment these are: - 'Zoom' action - 'Clean up' action (align to grid) - 'New text annotation' action (with a size menu) - 'New arrow annotation' action (with a color menu) """ return [self.__zoomAction, self.__cleanUpAction, self.__newTextAnnotationAction, self.__newArrowAnnotationAction] def menuBarActions(self): """ Return a list of actions that can be inserted into a `QMenuBar`. """ return [self.__editMenu.menuAction(), self.__widgetMenu.menuAction()] def isModified(self): """ Is the document is a modified state. """ return self.__modified or not self.__undoStack.isClean() def setModified(self, modified): """ Set the document modified state. """ if self.__modified != modified: self.__modified = modified if not modified: self.__cleanProperties = node_properties(self.__scheme) self.__undoStack.setClean() else: self.__cleanProperties = [] modified = Property(bool, fget=isModified, fset=setModified) def isModifiedStrict(self): """ Is the document modified. Run a strict check against all node properties as they were at the time when the last call to `setModified(True)` was made. """ propertiesChanged = self.__cleanProperties != \ node_properties(self.__scheme) log.debug("Modified strict check (modified flag: %s, " "undo stack clean: %s, properties: %s)", self.__modified, self.__undoStack.isClean(), propertiesChanged) return self.isModified() or propertiesChanged def setQuickMenuTriggers(self, triggers): """ Set quick menu trigger flags. Flags can be a bitwise `or` of: - `SchemeEditWidget.NoTrigeres` - `SchemeEditWidget.RightClicked` - `SchemeEditWidget.DoubleClicked` - `SchemeEditWidget.SpaceKey` - `SchemeEditWidget.AnyKey` """ if self.__quickMenuTriggers != triggers: self.__quickMenuTriggers = triggers def quickMenuTriggers(self): """ Return quick menu trigger flags. """ return self.__quickMenuTriggers def setChannelNamesVisible(self, visible): """ Set channel names visibility state. When enabled the links in the view will have a source/sink channel names displayed over them. """ if self.__channelNamesVisible != visible: self.__channelNamesVisible = visible self.__scene.set_channel_names_visible(visible) def channelNamesVisible(self): """ Return the channel name visibility state. """ return self.__channelNamesVisible def setNodeAnimationEnabled(self, enabled): """ Set the node item animation enabled state. """ if self.__nodeAnimationEnabled != enabled: self.__nodeAnimationEnabled = enabled self.__scene.set_node_animation_enabled(enabled) def nodeAnimationEnabled(self): """ Return the node item animation enabled state. """ return self.__nodeAnimationEnabled def undoStack(self): """ Return the undo stack. """ return self.__undoStack def setPath(self, path): """ Set the path associated with the current scheme. .. note:: Calling `setScheme` will invalidate the path (i.e. set it to an empty string) """ if self.__path != path: self.__path = unicode(path) self.pathChanged.emit(self.__path) def path(self): """ Return the path associated with the scheme """ return self.__path def setScheme(self, scheme): """ Set the :class:`~.scheme.Scheme` instance to display/edit. """ if self.__scheme is not scheme: if self.__scheme: self.__scheme.title_changed.disconnect(self.titleChanged) self.__scheme.removeEventFilter(self) sm = self.__scheme.findChild(signalmanager.SignalManager) if sm: sm.stateChanged.disconnect( self.__signalManagerStateChanged) self.__scheme = scheme self.setPath("") if self.__scheme: self.__scheme.title_changed.connect(self.titleChanged) self.titleChanged.emit(scheme.title) self.__cleanProperties = node_properties(scheme) sm = scheme.findChild(signalmanager.SignalManager) if sm: sm.stateChanged.connect(self.__signalManagerStateChanged) else: self.__cleanProperties = [] self.__teardownScene(self.__scene) self.__scene.deleteLater() self.__undoStack.clear() self.__scene = CanvasScene() self.__setupScene(self.__scene) self.__view.setScene(self.__scene) self.__scene.set_scheme(scheme) if self.__scheme: self.__scheme.installEventFilter(self) def scheme(self): """ Return the :class:`~.scheme.Scheme` edited by the widget. """ return self.__scheme def scene(self): """ Return the :class:`QGraphicsScene` instance used to display the current scheme. """ return self.__scene def view(self): """ Return the :class:`QGraphicsView` instance used to display the current scene. """ return self.__view def setRegistry(self, registry): # Is this method necessary? # It should be removed when the scene (items) is fixed # so all information regarding the visual appearance is # included in the node/widget description. self.__registry = registry if self.__scene: self.__scene.set_registry(registry) self.__quickMenu = None def quickMenu(self): """ Return a :class:`~.quickmenu.QuickMenu` popup menu instance for new node creation. """ if self.__quickMenu is None: menu = quickmenu.QuickMenu(self) if self.__registry is not None: menu.setModel(self.__registry.model()) self.__quickMenu = menu return self.__quickMenu def setTitle(self, title): """ Set the scheme title. """ self.__undoStack.push( commands.SetAttrCommand(self.__scheme, "title", title) ) def setDescription(self, description): """ Set the scheme description string. """ self.__undoStack.push( commands.SetAttrCommand(self.__scheme, "description", description) ) def addNode(self, node): """ Add a new node (:class:`.SchemeNode`) to the document. """ command = commands.AddNodeCommand(self.__scheme, node) self.__undoStack.push(command) def createNewNode(self, description, title=None, position=None): """ Create a new :class:`.SchemeNode` and add it to the document. The new node is constructed using :func:`newNodeHelper` method. """ node = self.newNodeHelper(description, title, position) self.addNode(node) return node def newNodeHelper(self, description, title=None, position=None): """ Return a new initialized :class:`.SchemeNode`. If `title` and `position` are not supplied they are initialized to sensible defaults. """ if title is None: title = self.enumerateTitle(description.name) if position is None: position = self.nextPosition() return SchemeNode(description, title=title, position=position) def enumerateTitle(self, title): """ Enumerate a `title` string (i.e. add a number in parentheses) so it is not equal to any node title in the current scheme. """ curr_titles = set([node.title for node in self.scheme().nodes]) template = title + " ({0})" enumerated = itertools.imap(template.format, itertools.count(1)) candidates = itertools.chain([title], enumerated) seq = itertools.dropwhile(curr_titles.__contains__, candidates) return next(seq) def nextPosition(self): """ Return the next default node position as a (x, y) tuple. This is a position left of the last added node. """ nodes = self.scheme().nodes if nodes: x, y = nodes[-1].position position = (x + 150, y) else: position = (150, 150) return position def removeNode(self, node): """ Remove a `node` (:class:`.SchemeNode`) from the scheme """ command = commands.RemoveNodeCommand(self.__scheme, node) self.__undoStack.push(command) def renameNode(self, node, title): """ Rename a `node` (:class:`.SchemeNode`) to `title`. """ command = commands.RenameNodeCommand(self.__scheme, node, title) self.__undoStack.push(command) def addLink(self, link): """ Add a `link` (:class:`.SchemeLink`) to the scheme. """ command = commands.AddLinkCommand(self.__scheme, link) self.__undoStack.push(command) def removeLink(self, link): """ Remove a link (:class:`.SchemeLink`) from the scheme. """ command = commands.RemoveLinkCommand(self.__scheme, link) self.__undoStack.push(command) def addAnnotation(self, annotation): """ Add `annotation` (:class:`.BaseSchemeAnnotation`) to the scheme """ command = commands.AddAnnotationCommand(self.__scheme, annotation) self.__undoStack.push(command) def removeAnnotation(self, annotation): """ Remove `annotation` (:class:`.BaseSchemeAnnotation`) from the scheme. """ command = commands.RemoveAnnotationCommand(self.__scheme, annotation) self.__undoStack.push(command) def removeSelected(self): """ Remove all selected items in the scheme. """ selected = self.scene().selectedItems() if not selected: return self.__undoStack.beginMacro(self.tr("Remove")) for item in selected: if isinstance(item, items.NodeItem): node = self.scene().node_for_item(item) self.__undoStack.push( commands.RemoveNodeCommand(self.__scheme, node) ) elif isinstance(item, items.annotationitem.Annotation): annot = self.scene().annotation_for_item(item) self.__undoStack.push( commands.RemoveAnnotationCommand(self.__scheme, annot) ) self.__undoStack.endMacro() def selectAll(self): """ Select all selectable items in the scheme. """ for item in self.__scene.items(): if item.flags() & QGraphicsItem.ItemIsSelectable: item.setSelected(True) def toggleZoom(self, zoom): """ Toggle view zoom. If `zoom` is True the scheme is displayed scaled to 150%. """ view = self.view() if zoom: view.scale(1.5, 1.5) else: view.resetTransform() def alignToGrid(self): """ Align nodes to a grid. """ # TODO: The the current layout implementation is BAD (fix is urgent). tile_size = 150 tiles = {} nodes = sorted(self.scheme().nodes, key=attrgetter("position")) if nodes: self.__undoStack.beginMacro(self.tr("Align To Grid")) for node in nodes: x, y = node.position x = int(round(float(x) / tile_size) * tile_size) y = int(round(float(y) / tile_size) * tile_size) while (x, y) in tiles: x += tile_size self.__undoStack.push( commands.MoveNodeCommand(self.scheme(), node, node.position, (x, y)) ) tiles[x, y] = node self.__scene.item_for_node(node).setPos(x, y) self.__undoStack.endMacro() def focusNode(self): """ Return the current focused :class:`.SchemeNode` or ``None`` if no node has focus. """ focus = self.__scene.focusItem() node = None if isinstance(focus, items.NodeItem): try: node = self.__scene.node_for_item(focus) except KeyError: # in case the node has been removed but the scene was not # yet fully updated. node = None return node def selectedNodes(self): """ Return all selected :class:`.SchemeNode` items. """ return map(self.scene().node_for_item, self.scene().selected_node_items()) def selectedAnnotations(self): """ Return all selected :class:`.BaseSchemeAnnotation` items. """ return map(self.scene().annotation_for_item, self.scene().selected_annotation_items()) def openSelected(self): """ Open (show and raise) all widgets for the current selected nodes. """ selected = self.scene().selected_node_items() for item in selected: self.__onNodeActivate(item) def editNodeTitle(self, node): """ Edit (rename) the `node`'s title. Opens an input dialog. """ name, ok = QInputDialog.getText( self, self.tr("Rename"), unicode(self.tr("Enter a new name for the '%s' widget")) \ % node.title, text=node.title ) if ok: self.__undoStack.push( commands.RenameNodeCommand(self.__scheme, node, node.title, unicode(name)) ) def __onCleanChanged(self, clean): if self.isWindowModified() != (not clean): self.setWindowModified(not clean) self.modificationChanged.emit(not clean) def changeEvent(self, event): if event.type() == QEvent.FontChange: self.__updateFont() QWidget.changeEvent(self, event) def eventFilter(self, obj, event): # Filter the scene's drag/drop events. if obj is self.scene(): etype = event.type() if etype == QEvent.GraphicsSceneDragEnter or \ etype == QEvent.GraphicsSceneDragMove: mime_data = event.mimeData() if mime_data.hasFormat( "application/vnv.orange-canvas.registry.qualified-name" ): event.acceptProposedAction() else: event.ignore() return True elif etype == QEvent.GraphicsSceneDrop: data = event.mimeData() qname = data.data( "application/vnv.orange-canvas.registry.qualified-name" ) try: desc = self.__registry.widget(unicode(qname)) except KeyError: log.error("Unknown qualified name '%s'", qname) else: pos = event.scenePos() self.createNewNode(desc, position=(pos.x(), pos.y())) return True elif etype == QEvent.GraphicsSceneMousePress: return self.sceneMousePressEvent(event) elif etype == QEvent.GraphicsSceneMouseMove: return self.sceneMouseMoveEvent(event) elif etype == QEvent.GraphicsSceneMouseRelease: return self.sceneMouseReleaseEvent(event) elif etype == QEvent.GraphicsSceneMouseDoubleClick: return self.sceneMouseDoubleClickEvent(event) elif etype == QEvent.KeyPress: return self.sceneKeyPressEvent(event) elif etype == QEvent.KeyRelease: return self.sceneKeyReleaseEvent(event) elif etype == QEvent.GraphicsSceneContextMenu: return self.sceneContextMenuEvent(event) elif obj is self.__scheme: if event.type() == QEvent.WhatsThisClicked: # Re post the event self.__showHelpFor(event.href()) elif event.type() == \ widgetsscheme.ActivateParentEvent.ActivateParent: self.window().activateWindow() self.window().raise_() return QWidget.eventFilter(self, obj, event) def sceneMousePressEvent(self, event): scene = self.__scene if scene.user_interaction_handler: return False pos = event.scenePos() anchor_item = scene.item_at(pos, items.NodeAnchorItem, buttons=Qt.LeftButton) if anchor_item and event.button() == Qt.LeftButton: # Start a new link starting at item scene.clearSelection() handler = interactions.NewLinkAction(self) self._setUserInteractionHandler(handler) return handler.mousePressEvent(event) any_item = scene.item_at(pos) if not any_item: self.__emptyClickButtons |= event.button() if not any_item and event.button() == Qt.LeftButton: # Create a RectangleSelectionAction but do not set in on the scene # just yet (instead wait for the mouse move event). handler = interactions.RectangleSelectionAction(self) rval = handler.mousePressEvent(event) if rval == True: self.__possibleSelectionHandler = handler return rval if any_item and event.button() == Qt.LeftButton: self.__possibleMouseItemsMove = True self.__itemsMoving.clear() self.__scene.node_item_position_changed.connect( self.__onNodePositionChanged ) self.__annotationGeomChanged.mapped[QObject].connect( self.__onAnnotationGeometryChanged ) set_enabled_all(self.__disruptiveActions, False) return False def sceneMouseMoveEvent(self, event): scene = self.__scene if scene.user_interaction_handler: return False if self.__emptyClickButtons & Qt.LeftButton and \ event.buttons() & Qt.LeftButton and \ self.__possibleSelectionHandler: # Set the RectangleSelection (initialized in mousePressEvent) # on the scene handler = self.__possibleSelectionHandler self._setUserInteractionHandler(handler) self.__possibleSelectionHandler = None return handler.mouseMoveEvent(event) return False def sceneMouseReleaseEvent(self, event): scene = self.__scene if scene.user_interaction_handler: return False if event.button() == Qt.LeftButton and self.__possibleMouseItemsMove: self.__possibleMouseItemsMove = False self.__scene.node_item_position_changed.disconnect( self.__onNodePositionChanged ) self.__annotationGeomChanged.mapped[QObject].disconnect( self.__onAnnotationGeometryChanged ) set_enabled_all(self.__disruptiveActions, True) if self.__itemsMoving: self.__scene.mouseReleaseEvent(event) stack = self.undoStack() stack.beginMacro(self.tr("Move")) for scheme_item, (old, new) in self.__itemsMoving.items(): if isinstance(scheme_item, SchemeNode): command = commands.MoveNodeCommand( self.scheme(), scheme_item, old, new ) elif isinstance(scheme_item, BaseSchemeAnnotation): command = commands.AnnotationGeometryChange( self.scheme(), scheme_item, old, new ) else: continue stack.push(command) stack.endMacro() self.__itemsMoving.clear() return True elif event.button() == Qt.LeftButton: self.__possibleSelectionHandler = None return False def sceneMouseDoubleClickEvent(self, event): scene = self.__scene if scene.user_interaction_handler: return False item = scene.item_at(event.scenePos()) if not item and self.__quickMenuTriggers & \ SchemeEditWidget.DoubleClicked: # Double click on an empty spot # Create a new node using QuickMenu action = interactions.NewNodeAction(self) with nested(disabled(self.__undoAction), disabled(self.__redoAction)): action.create_new(event.screenPos()) event.accept() return True item = scene.item_at(event.scenePos(), items.LinkItem, buttons=Qt.LeftButton) if item is not None and event.button() == Qt.LeftButton: link = self.scene().link_for_item(item) action = interactions.EditNodeLinksAction(self, link.source_node, link.sink_node) action.edit_links() event.accept() return True return False def sceneKeyPressEvent(self, event): scene = self.__scene if scene.user_interaction_handler: return False # If a QGraphicsItem is in text editing mode, don't interrupt it focusItem = scene.focusItem() if focusItem and isinstance(focusItem, QGraphicsTextItem) and \ focusItem.textInteractionFlags() & Qt.TextEditable: return False # If the mouse is not over out view if not self.view().underMouse(): return False handler = None searchText = "" if (event.key() == Qt.Key_Space and \ self.__quickMenuTriggers & SchemeEditWidget.SpaceKey): handler = interactions.NewNodeAction(self) elif len(event.text()) and \ self.__quickMenuTriggers & SchemeEditWidget.AnyKey and \ is_printable(unicode(event.text())[0]): handler = interactions.NewNodeAction(self) searchText = unicode(event.text()) # TODO: set the search text to event.text() and set focus on the # search line if handler is not None: # Control + Backspace (remove widget action on Mac OSX) conflicts # with the 'Clear text' action in the search widget (there might # be selected items in the canvas), so we disable the # remove widget action so the text editing follows standard # 'look and feel' with nested(disabled(self.__removeSelectedAction), disabled(self.__undoAction), disabled(self.__redoAction)): handler.create_new(QCursor.pos(), searchText) event.accept() return True return False def sceneKeyReleaseEvent(self, event): return False def sceneContextMenuEvent(self, event): return False def _setUserInteractionHandler(self, handler): """ Helper method for setting the user interaction handlers. """ if self.__scene.user_interaction_handler: if isinstance(self.__scene.user_interaction_handler, (interactions.ResizeArrowAnnotation, interactions.ResizeTextAnnotation)): self.__scene.user_interaction_handler.commit() self.__scene.user_interaction_handler.ended.disconnect( self.__onInteractionEnded ) if handler: handler.ended.connect(self.__onInteractionEnded) # Disable actions which could change the model set_enabled_all(self.__disruptiveActions, False) self.__scene.set_user_interaction_handler(handler) def __onInteractionEnded(self): self.sender().ended.disconnect(self.__onInteractionEnded) set_enabled_all(self.__disruptiveActions, True) def __onSelectionChanged(self): nodes = self.selectedNodes() annotations = self.selectedAnnotations() self.__openSelectedAction.setEnabled(bool(nodes)) self.__removeSelectedAction.setEnabled( bool(nodes) or bool(annotations) ) self.__helpAction.setEnabled(len(nodes) == 1) self.__renameAction.setEnabled(len(nodes) == 1) if len(nodes) > 1: self.__openSelectedAction.setText(self.tr("Open All")) else: self.__openSelectedAction.setText(self.tr("Open")) if len(nodes) + len(annotations) > 1: self.__removeSelectedAction.setText(self.tr("Remove All")) else: self.__removeSelectedAction.setText(self.tr("Remove")) if len(nodes) == 0: self.__openSelectedAction.setText(self.tr("Open")) self.__removeSelectedAction.setText(self.tr("Remove")) focus = self.focusNode() if focus is not None: desc = focus.description tip = whats_this_helper(desc, include_more_link=True) else: tip = "" if tip != self.__quickTip: self.__quickTip = tip ev = QuickHelpTipEvent("", self.__quickTip, priority=QuickHelpTipEvent.Permanent) QCoreApplication.sendEvent(self, ev) def __onNodeActivate(self, item): node = self.__scene.node_for_item(item) widget = self.scheme().widget_for_node(node) widget.show() widget.raise_() widget.activateWindow() def __onNodePositionChanged(self, item, pos): node = self.__scene.node_for_item(item) new = (pos.x(), pos.y()) if node not in self.__itemsMoving: self.__itemsMoving[node] = (node.position, new) else: old, _ = self.__itemsMoving[node] self.__itemsMoving[node] = (old, new) def __onAnnotationGeometryChanged(self, item): annot = self.scene().annotation_for_item(item) if annot not in self.__itemsMoving: self.__itemsMoving[annot] = (annot.geometry, geometry_from_annotation_item(item)) else: old, _ = self.__itemsMoving[annot] self.__itemsMoving[annot] = (old, geometry_from_annotation_item(item)) def __onAnnotationAdded(self, item): log.debug("Annotation added (%r)", item) item.setFlag(QGraphicsItem.ItemIsSelectable) item.setFlag(QGraphicsItem.ItemIsMovable) item.setFlag(QGraphicsItem.ItemIsFocusable) item.installSceneEventFilter(self.__focusListener) if isinstance(item, items.ArrowAnnotation): pass elif isinstance(item, items.TextAnnotation): # Make the annotation editable. item.setTextInteractionFlags(Qt.TextEditorInteraction) self.__editFinishedMapper.setMapping(item, item) item.editingFinished.connect( self.__editFinishedMapper.map ) self.__annotationGeomChanged.setMapping(item, item) item.geometryChanged.connect( self.__annotationGeomChanged.map ) def __onAnnotationRemoved(self, item): log.debug("Annotation removed (%r)", item) if isinstance(item, items.ArrowAnnotation): pass elif isinstance(item, items.TextAnnotation): item.editingFinished.disconnect( self.__editFinishedMapper.map ) item.removeSceneEventFilter(self.__focusListener) self.__annotationGeomChanged.removeMappings(item) item.geometryChanged.disconnect( self.__annotationGeomChanged.map ) def __onItemFocusedIn(self, item): """ Annotation item has gained focus. """ if not self.__scene.user_interaction_handler: self.__startControlPointEdit(item) def __onItemFocusedOut(self, item): """ Annotation item lost focus. """ self.__endControlPointEdit() def __onEditingFinished(self, item): """ Text annotation editing has finished. """ annot = self.__scene.annotation_for_item(item) text = unicode(item.toPlainText()) if annot.text != text: self.__undoStack.push( commands.TextChangeCommand(self.scheme(), annot, annot.text, text) ) def __toggleNewArrowAnnotation(self, checked): if self.__newTextAnnotationAction.isChecked(): # Uncheck the text annotation action if needed. self.__newTextAnnotationAction.setChecked(not checked) action = self.__newArrowAnnotationAction if not checked: # The action was unchecked (canceled by the user) handler = self.__scene.user_interaction_handler if isinstance(handler, interactions.NewArrowAnnotation): # Cancel the interaction and restore the state handler.ended.disconnect(action.toggle) handler.cancel(interactions.UserInteraction.UserCancelReason) log.info("Canceled new arrow annotation") else: handler = interactions.NewArrowAnnotation(self) checked = self.__arrowColorActionGroup.checkedAction() handler.setColor(checked.data().toPyObject()) handler.ended.connect(action.toggle) self._setUserInteractionHandler(handler) def __onFontSizeTriggered(self, action): if not self.__newTextAnnotationAction.isChecked(): # When selecting from the (font size) menu the 'Text' # action does not get triggered automatically. self.__newTextAnnotationAction.trigger() else: # Update the preferred font on the interaction handler. handler = self.__scene.user_interaction_handler if isinstance(handler, interactions.NewTextAnnotation): handler.setFont(action.font()) def __toggleNewTextAnnotation(self, checked): if self.__newArrowAnnotationAction.isChecked(): # Uncheck the arrow annotation if needed. self.__newArrowAnnotationAction.setChecked(not checked) action = self.__newTextAnnotationAction if not checked: # The action was unchecked (canceled by the user) handler = self.__scene.user_interaction_handler if isinstance(handler, interactions.NewTextAnnotation): # cancel the interaction and restore the state handler.ended.disconnect(action.toggle) handler.cancel(interactions.UserInteraction.UserCancelReason) log.info("Canceled new text annotation") else: handler = interactions.NewTextAnnotation(self) checked = self.__fontActionGroup.checkedAction() handler.setFont(checked.font()) handler.ended.connect(action.toggle) self._setUserInteractionHandler(handler) def __onArrowColorTriggered(self, action): if not self.__newArrowAnnotationAction.isChecked(): # When selecting from the (color) menu the 'Arrow' # action does not get triggered automatically. self.__newArrowAnnotationAction.trigger() else: # Update the preferred color on the interaction handler handler = self.__scene.user_interaction_handler if isinstance(handler, interactions.NewArrowAnnotation): handler.setColor(action.data().toPyObject()) def __onCustomContextMenuRequested(self, pos): scenePos = self.view().mapToScene(pos) globalPos = self.view().mapToGlobal(pos) item = self.scene().item_at(scenePos, items.NodeItem) if item is not None: self.__widgetMenu.popup(globalPos) return item = self.scene().item_at(scenePos, items.LinkItem, buttons=Qt.RightButton) if item is not None: link = self.scene().link_for_item(item) self.__linkEnableAction.setChecked(link.enabled) self.__contextMenuTarget = link self.__linkMenu.popup(globalPos) return item = self.scene().item_at(scenePos) if not item and \ self.__quickMenuTriggers & SchemeEditWidget.RightClicked: action = interactions.NewNodeAction(self) with nested(disabled(self.__undoAction), disabled(self.__redoAction)): action.create_new(globalPos) return def __onRenameAction(self): """ Rename was requested for the selected widget. """ selected = self.selectedNodes() if len(selected) == 1: self.editNodeTitle(selected[0]) def __onHelpAction(self): """ Help was requested for the selected widget. """ nodes = self.selectedNodes() help_url = None if len(nodes) == 1: node = nodes[0] desc = node.description help_url = "help://search?" + urlencode({"id": desc.id}) self.__showHelpFor(help_url) def __showHelpFor(self, help_url): """ Show help for an "help" url. """ # Notify the parent chain and let them respond ev = QWhatsThisClickedEvent(help_url) handled = QCoreApplication.sendEvent(self, ev) if not handled: message_information( self.tr("Sorry there is no documentation available for " "this widget."), parent=self) def __toggleLinkEnabled(self, enabled): """ Link 'enabled' state was toggled in the context menu. """ if self.__contextMenuTarget: link = self.__contextMenuTarget command = commands.SetAttrCommand( link, "enabled", enabled, name=self.tr("Set enabled"), ) self.__undoStack.push(command) def __linkRemove(self): """ Remove link was requested from the context menu. """ if self.__contextMenuTarget: self.removeLink(self.__contextMenuTarget) def __linkReset(self): """ Link reset from the context menu was requested. """ if self.__contextMenuTarget: link = self.__contextMenuTarget action = interactions.EditNodeLinksAction( self, link.source_node, link.sink_node ) action.edit_links() def __startControlPointEdit(self, item): """ Start a control point edit interaction for `item`. """ if isinstance(item, items.ArrowAnnotation): handler = interactions.ResizeArrowAnnotation(self) elif isinstance(item, items.TextAnnotation): handler = interactions.ResizeTextAnnotation(self) else: log.warning("Unknown annotation item type %r" % item) return handler.editItem(item) self._setUserInteractionHandler(handler) log.info("Control point editing started (%r)." % item) def __endControlPointEdit(self): """ End the current control point edit interaction. """ handler = self.__scene.user_interaction_handler if isinstance(handler, (interactions.ResizeArrowAnnotation, interactions.ResizeTextAnnotation)) and \ not handler.isFinished() and not handler.isCanceled(): handler.commit() handler.end() log.info("Control point editing finished.") def __updateFont(self): """ Update the font for the "Text size' menu and the default font used in the `CanvasScene`. """ actions = self.__fontActionGroup.actions() font = self.font() for action in actions: size = action.font().pixelSize() action_font = QFont(font) action_font.setPixelSize(size) action.setFont(action_font) if self.__scene: self.__scene.setFont(font) def __signalManagerStateChanged(self, state): if state == signalmanager.SignalManager.Running: self.__view.setBackgroundBrush(QBrush(Qt.NoBrush)) # self.__view.setBackgroundIcon(QIcon()) elif state == signalmanager.SignalManager.Paused: self.__view.setBackgroundBrush(QBrush(QColor(235, 235, 235))) # self.__view.setBackgroundIcon(QIcon("canvas_icons:Pause.svg")) def geometry_from_annotation_item(item): if isinstance(item, items.ArrowAnnotation): line = item.line() p1 = item.mapToScene(line.p1()) p2 = item.mapToScene(line.p2()) return ((p1.x(), p1.y()), (p2.x(), p2.y())) elif isinstance(item, items.TextAnnotation): geom = item.geometry() return (geom.x(), geom.y(), geom.width(), geom.height()) def mouse_drag_distance(event, button=Qt.LeftButton): """ Return the (manhattan) distance between the mouse position when the `button` was pressed and the current mouse position. """ diff = (event.buttonDownScreenPos(button) - event.screenPos()) return diff.manhattanLength() def set_enabled_all(objects, enable): """ Set `enabled` properties on all objects (objects with `setEnabled` method). """ for obj in objects: obj.setEnabled(enable) # All control character categories. _control = set(["Cc", "Cf", "Cs", "Co", "Cn"]) def is_printable(unichar): """ Return True if the unicode character `unichar` is a printable character. """ return unicodedata.category(unichar) not in _control def node_properties(scheme): scheme.sync_node_properties() return [dict(node.properties) for node in scheme.nodes]
mjbrewer/testindex
refs/heads/master
magnum/tests/unit/common/test_clients.py
6
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from barbicanclient import client as barbicanclient from glanceclient.v2 import client as glanceclient from heatclient.v1 import client as heatclient import mock from oslo_config import cfg from magnum.common import clients from magnum.common import exception from magnum.tests import base class ClientsTest(base.BaseTestCase): @mock.patch.object(clients.OpenStackClients, 'keystone') def test_url_for(self, mock_keystone): obj = clients.OpenStackClients(None) obj.url_for(service_type='fake_service', endpoint_type='fake_endpoint') mock_cat = mock_keystone.return_value.client.service_catalog mock_cat.url_for.assert_called_once_with(service_type='fake_service', endpoint_type='fake_endpoint') @mock.patch.object(clients.OpenStackClients, 'keystone') def test_magnum_url(self, mock_keystone): fake_region = 'fake_region' fake_endpoint = 'fake_endpoint' cfg.CONF.set_override('region_name', fake_region, group='magnum_client') cfg.CONF.set_override('endpoint_type', fake_endpoint, group='magnum_client') obj = clients.OpenStackClients(None) obj.magnum_url() mock_cat = mock_keystone.return_value.client.service_catalog mock_cat.url_for.assert_called_once_with(region_name=fake_region, service_type='container', endpoint_type=fake_endpoint) @mock.patch.object(heatclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_heat(self, expected_region_name, mock_auth, mock_url, mock_call): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None obj.heat() mock_call.assert_called_once_with( endpoint='url_from_keystone', username=None, cert_file=None, token='3bcc3d3a03f44e3d8377f9247b0ad155', auth_url='keystone_url', ca_file=None, key_file=None, password=None, insecure=False) mock_url.assert_called_once_with(service_type='orchestration', endpoint_type='publicURL', region_name=expected_region_name) def test_clients_heat(self): self._test_clients_heat(None) def test_clients_heat_region(self): cfg.CONF.set_override('region_name', 'myregion', group='heat_client') self._test_clients_heat('myregion') def test_clients_heat_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None self.assertRaises(exception.AuthorizationFailure, obj.heat) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_heat_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._heat = None heat = obj.heat() heat_cached = obj.heat() self.assertEqual(heat, heat_cached) @mock.patch.object(glanceclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def _test_clients_glance(self, expected_region_name, mock_auth, mock_url, mock_call): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None obj.glance() mock_call.assert_called_once_with( endpoint='url_from_keystone', username=None, token='3bcc3d3a03f44e3d8377f9247b0ad155', auth_url='keystone_url', password=None) mock_url.assert_called_once_with(service_type='image', endpoint_type='publicURL', region_name=expected_region_name) def test_clients_glance(self): self._test_clients_glance(None) def test_clients_glance_region(self): cfg.CONF.set_override('region_name', 'myregion', group='glance_client') self._test_clients_glance('myregion') def test_clients_glance_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None self.assertRaises(exception.AuthorizationFailure, obj.glance) @mock.patch.object(clients.OpenStackClients, 'url_for') @mock.patch.object(clients.OpenStackClients, 'auth_url') def test_clients_glance_cached(self, mock_auth, mock_url): mock_auth.__get__ = mock.Mock(return_value="keystone_url") con = mock.MagicMock() con.auth_token = "3bcc3d3a03f44e3d8377f9247b0ad155" con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._glance = None glance = obj.glance() glance_cached = obj.glance() self.assertEqual(glance, glance_cached) @mock.patch.object(clients.OpenStackClients, 'keystone') @mock.patch.object(barbicanclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'url_for') def _test_clients_barbican(self, expected_region_name, mock_url, mock_call, mock_keystone): con = mock.MagicMock() con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" keystone = mock.MagicMock() keystone.client.session = mock.MagicMock() mock_keystone.return_value = keystone obj = clients.OpenStackClients(con) obj._barbican = None obj.barbican() mock_call.assert_called_once_with( endpoint='url_from_keystone', session=keystone.client.session) mock_keystone.assert_called_once_with() mock_url.assert_called_once_with(service_type='key-manager', endpoint_type='publicURL', region_name=expected_region_name) def test_clients_barbican(self): self._test_clients_barbican(None) def test_clients_barbican_region(self): cfg.CONF.set_override('region_name', 'myregion', group='barbican_client') self._test_clients_barbican('myregion') def test_clients_barbican_noauth(self): con = mock.MagicMock() con.auth_token = None con.auth_token_info = None auth_url = mock.PropertyMock(name="auth_url", return_value="keystone_url") type(con).auth_url = auth_url con.get_url_for = mock.Mock(name="get_url_for") con.get_url_for.return_value = "url_from_keystone" obj = clients.OpenStackClients(con) obj._barbican = None self.assertRaises(exception.AuthorizationFailure, obj.barbican) @mock.patch.object(clients.OpenStackClients, 'keystone') @mock.patch.object(clients.OpenStackClients, 'url_for') def test_clients_barbican_cached(self, mock_url, mock_keystone): con = mock.MagicMock() con.auth_url = "keystone_url" mock_url.return_value = "url_from_keystone" keystone = mock.MagicMock() keystone.client.session = mock.MagicMock() mock_keystone.return_value = keystone obj = clients.OpenStackClients(con) obj._barbican = None barbican = obj.barbican() barbican_cached = obj.barbican() self.assertEqual(barbican, barbican_cached)
chrisfranzen/django
refs/heads/master
tests/regressiontests/mail/__init__.py
45382
alexlo03/ansible
refs/heads/devel
lib/ansible/modules/network/nxos/_nxos_mtu.py
71
#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['removed'], 'supported_by': 'network'} from ansible.module_utils.common.removed import removed_module if __name__ == '__main__': removed_module(removed_in="2.5")
CaringCaribou/caringcaribou
refs/heads/master
tool/tests/test_iso_15765_2.py
1
from __future__ import print_function from tests.mock.mock_ecu_iso_tp import MockEcuIsoTp from lib import iso15765_2 from lib.can_actions import DEFAULT_INTERFACE import can import unittest class IsoTpTestCase(unittest.TestCase): ARB_ID_REQUEST = 0x100A ARB_ID_RESPONSE = 0x100B def setUp(self): # Initialize mock ECU self.ecu = MockEcuIsoTp(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE) self.ecu.start_server() # Initialize virtual CAN bus can_bus = can.Bus(DEFAULT_INTERFACE) # Setup ISO-TP layer self.tp = iso15765_2.IsoTp(self.ARB_ID_REQUEST, self.ARB_ID_RESPONSE, bus=can_bus) def tearDown(self): if isinstance(self.ecu, MockEcuIsoTp): self.ecu.__exit__(None, None, None) if isinstance(self.tp, iso15765_2.IsoTp): self.tp.__exit__(None, None, None) def test_create_iso_15765_2(self): self.assertIsInstance(self.tp, iso15765_2.IsoTp, "Failed to initialize ISO-TP") def test_single_frame(self): # Send request self.tp.send_request(MockEcuIsoTp.MOCK_SINGLE_FRAME_REQUEST) # Receive response response = self.tp.indication() # Validate response self.assertIsInstance(response, list, "No SF response received") self.assertEqual(response, MockEcuIsoTp.MOCK_SINGLE_FRAME_RESPONSE) def test_multi_frame_two_frames(self): # Send request self.tp.send_request(MockEcuIsoTp.MOCK_MULTI_FRAME_TWO_MESSAGES_REQUEST) # Receive response response = self.tp.indication() # Validate response self.assertIsInstance(response, list, "No multi-frame response received") self.assertEqual(response, MockEcuIsoTp.MOCK_MULTI_FRAME_TWO_MESSAGES_RESPONSE) def test_multi_frame_long_message(self): # Send request self.tp.send_request(MockEcuIsoTp.MOCK_MULTI_FRAME_LONG_MESSAGE_REQUEST) # Receive response response = self.tp.indication() # Validate response self.assertIsInstance(response, list, "No multi-frame response received") self.assertEqual(response, MockEcuIsoTp.MOCK_MULTI_FRAME_LONG_MESSAGE_RESPONSE) def test_fail_too_long_message(self): with self.assertRaises(ValueError): max_allowed_length = iso15765_2.IsoTp.MAX_MESSAGE_LENGTH too_long_message = [0x0] * (max_allowed_length + 1) self.tp.send_request(too_long_message)
jabez1314/youtube-dl
refs/heads/master
youtube_dl/extractor/dhm.py
110
from __future__ import unicode_literals from .common import InfoExtractor from ..utils import parse_duration class DHMIE(InfoExtractor): IE_DESC = 'Filmarchiv - Deutsches Historisches Museum' _VALID_URL = r'https?://(?:www\.)?dhm\.de/filmarchiv/(?:[^/]+/)+(?P<id>[^/]+)' _TESTS = [{ 'url': 'http://www.dhm.de/filmarchiv/die-filme/the-marshallplan-at-work-in-west-germany/', 'md5': '11c475f670209bf6acca0b2b7ef51827', 'info_dict': { 'id': 'the-marshallplan-at-work-in-west-germany', 'ext': 'flv', 'title': 'MARSHALL PLAN AT WORK IN WESTERN GERMANY, THE', 'description': 'md5:1fabd480c153f97b07add61c44407c82', 'duration': 660, 'thumbnail': 're:^https?://.*\.jpg$', }, }, { 'url': 'http://www.dhm.de/filmarchiv/02-mapping-the-wall/peter-g/rolle-1/', 'md5': '09890226332476a3e3f6f2cb74734aa5', 'info_dict': { 'id': 'rolle-1', 'ext': 'flv', 'title': 'ROLLE 1', 'thumbnail': 're:^https?://.*\.jpg$', }, }] def _real_extract(self, url): playlist_id = self._match_id(url) webpage = self._download_webpage(url, playlist_id) playlist_url = self._search_regex( r"file\s*:\s*'([^']+)'", webpage, 'playlist url') entries = self._extract_xspf_playlist(playlist_url, playlist_id) title = self._search_regex( [r'dc:title="([^"]+)"', r'<title> &raquo;([^<]+)</title>'], webpage, 'title').strip() description = self._html_search_regex( r'<p><strong>Description:</strong>(.+?)</p>', webpage, 'description', default=None) duration = parse_duration(self._search_regex( r'<em>Length\s*</em>\s*:\s*</strong>([^<]+)', webpage, 'duration', default=None)) entries[0].update({ 'title': title, 'description': description, 'duration': duration, }) return self.playlist_result(entries, playlist_id)
jamesorr/f90wrap
refs/heads/master
examples/example-arrays/tests.py
2
# -*- coding: utf-8 -*- """ Created on Tue Jul 28 15:19:03 2015 @author: David Verelst """ from __future__ import print_function import unittest import numpy as np import ExampleArray as lib class TestExample(unittest.TestCase): def setUp(self): pass def do_array_stuff(self, ndata): x = np.arange(ndata) y = np.arange(ndata) br = np.zeros((ndata,), order='F') co = np.zeros((4, ndata), order='F') lib.library.do_array_stuff(n=ndata, x=x, y=y, br=br, co=co) for k in range(4): np.testing.assert_allclose(x*y + x, co[k,:]) np.testing.assert_allclose(x/(y+1.0), br) def test_basic(self): self.do_array_stuff(1e3) def test_verybig_array(self): self.do_array_stuff(1e6) def test_square(self): n = 1e5 x = np.arange(n) y = np.arange(n) br = np.zeros((n,), order='F') co = np.zeros((4, n), order='F') lib.library.do_array_stuff(n=n, x=x, y=y, br=br, co=co) lib.library.only_manipulate(n=n, array=co) for k in range(4): np.testing.assert_allclose((x*y + x)**2, co[k,:]) def test_return_array(self): m, n = 10, 4 arr = np.ndarray((m,n), order='F', dtype=np.int32) lib.library.return_array(m, n, arr) ii, jj = np.mgrid[0:m,0:n] ii += 1 jj += 1 np.testing.assert_equal(ii*jj + jj, arr) if __name__ == '__main__': unittest.main()
kwantam/rust
refs/heads/master
src/etc/lldb_rust_formatters.py
15
# Copyright 2014 The Rust Project Developers. See the COPYRIGHT # file at the top-level directory of this distribution and at # http://rust-lang.org/COPYRIGHT. # # Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or # http://www.apache.org/licenses/LICENSE-2.0> or the MIT license # <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your # option. This file may not be copied, modified, or distributed # except according to those terms. import lldb import re def print_val(val, internal_dict): '''Prints the given value with Rust syntax''' type_class = val.GetType().GetTypeClass() if type_class == lldb.eTypeClassStruct: return print_struct_val(val, internal_dict) if type_class == lldb.eTypeClassUnion: return print_enum_val(val, internal_dict) if type_class == lldb.eTypeClassPointer: return print_pointer_val(val, internal_dict) if type_class == lldb.eTypeClassArray: return print_fixed_size_vec_val(val, internal_dict) return val.GetValue() #=-------------------------------------------------------------------------------------------------- # Type-Specialized Printing Functions #=-------------------------------------------------------------------------------------------------- def print_struct_val(val, internal_dict): '''Prints a struct, tuple, or tuple struct value with Rust syntax''' assert val.GetType().GetTypeClass() == lldb.eTypeClassStruct if is_vec_slice(val): return print_vec_slice_val(val, internal_dict) elif is_std_vec(val): return print_std_vec_val(val, internal_dict) else: return print_struct_val_starting_from(0, val, internal_dict) def print_struct_val_starting_from(field_start_index, val, internal_dict): ''' Prints a struct, tuple, or tuple struct value with Rust syntax. Ignores any fields before field_start_index. ''' assert val.GetType().GetTypeClass() == lldb.eTypeClassStruct t = val.GetType() type_name = extract_type_name(t.GetName()) num_children = val.num_children if (num_children - field_start_index) == 0: # The only field of this struct is the enum discriminant return type_name is_tuple_like = type_is_tuple_like(t) if is_tuple_like: template = "%(type_name)s(%(body)s)" separator = ", " else: template = "%(type_name)s {\n%(body)s\n}" separator = ", \n" if type_name.startswith("("): # this is a tuple, so don't print the type name type_name = "" def render_child(child_index): this = "" if not is_tuple_like: field_name = t.GetFieldAtIndex(child_index).GetName() this += field_name + ": " field_val = val.GetChildAtIndex(child_index) if not field_val.IsValid(): field = t.GetFieldAtIndex(child_index) # LLDB is not good at handling zero-sized values, so we have to help # it a little if field.GetType().GetByteSize() == 0: return this + extract_type_name(field.GetType().GetName()) else: return this + "<invalid value>" return this + print_val(field_val, internal_dict) body = separator.join([render_child(idx) for idx in range(field_start_index, num_children)]) return template % {"type_name": type_name, "body": body} def print_enum_val(val, internal_dict): '''Prints an enum value with Rust syntax''' assert val.GetType().GetTypeClass() == lldb.eTypeClassUnion if val.num_children == 1: # This is either an enum with just one variant, or it is an Option-like # enum where the discriminant is encoded in a non-nullable pointer # field. We find out which one it is by looking at the member name of # the sole union variant. If it starts with "RUST$ENCODED$ENUM$" then # we have an Option-like enum. first_variant_name = val.GetChildAtIndex(0).GetName() if first_variant_name and first_variant_name.startswith("RUST$ENCODED$ENUM$"): # This is an Option-like enum. The position of the discriminator field is # encoded in the name which has the format: # RUST$ENCODED$ENUM$<index of discriminator field>$<name of null variant> last_separator_index = first_variant_name.rfind("$") if last_separator_index == -1: return "<invalid enum encoding: %s>" % first_variant_name start_index = len("RUST$ENCODED$ENUM$") # Extract indices of the discriminator field try: disr_field_indices = first_variant_name[start_index:last_separator_index].split("$") disr_field_indices = [int(index) for index in disr_field_indices] except: return "<invalid enum encoding: %s>" % first_variant_name # Read the discriminant disr_val = val.GetChildAtIndex(0) for index in disr_field_indices: disr_val = disr_val.GetChildAtIndex(index) # If the discriminant field is a fat pointer we have to consider the # first word as the true discriminant if disr_val.GetType().GetTypeClass() == lldb.eTypeClassStruct: disr_val = disr_val.GetChildAtIndex(0) if disr_val.GetValueAsUnsigned() == 0: # Null case: Print the name of the null-variant null_variant_name = first_variant_name[last_separator_index + 1:] return null_variant_name else: # Non-null case: Interpret the data as a value of the non-null variant type return print_struct_val_starting_from(0, val.GetChildAtIndex(0), internal_dict) else: # This is just a regular uni-variant enum without discriminator field return print_struct_val_starting_from(0, val.GetChildAtIndex(0), internal_dict) # If we are here, this is a regular enum with more than one variant disr_val = val.GetChildAtIndex(0).GetChildMemberWithName("RUST$ENUM$DISR") disr_type = disr_val.GetType() if disr_type.GetTypeClass() != lldb.eTypeClassEnumeration: return "<Invalid enum value encountered: Discriminator is not an enum>" variant_index = disr_val.GetValueAsUnsigned() return print_struct_val_starting_from(1, val.GetChildAtIndex(variant_index), internal_dict) def print_pointer_val(val, internal_dict): '''Prints a pointer value with Rust syntax''' assert val.GetType().IsPointerType() sigil = "&" type_name = extract_type_name(val.GetType().GetName()) if type_name and type_name[0:1] in ["&", "~", "*"]: sigil = type_name[0:1] return sigil + hex(val.GetValueAsUnsigned()) #print_val(val.Dereference(), internal_dict) def print_fixed_size_vec_val(val, internal_dict): assert val.GetType().GetTypeClass() == lldb.eTypeClassArray output = "[" for i in range(val.num_children): output += print_val(val.GetChildAtIndex(i), internal_dict) if i != val.num_children - 1: output += ", " output += "]" return output def print_vec_slice_val(val, internal_dict): length = val.GetChildAtIndex(1).GetValueAsUnsigned() data_ptr_val = val.GetChildAtIndex(0) data_ptr_type = data_ptr_val.GetType() return "&[%s]" % print_array_of_values(val.GetName(), data_ptr_val, length, internal_dict) def print_std_vec_val(val, internal_dict): length = val.GetChildAtIndex(1).GetValueAsUnsigned() # Vec<> -> Unique<> -> NonZero<> -> *T data_ptr_val = val.GetChildAtIndex(0).GetChildAtIndex(0).GetChildAtIndex(0) data_ptr_type = data_ptr_val.GetType() return "vec![%s]" % print_array_of_values(val.GetName(), data_ptr_val, length, internal_dict) #=-------------------------------------------------------------------------------------------------- # Helper Functions #=-------------------------------------------------------------------------------------------------- unqualified_type_markers = frozenset(["(", "[", "&", "*"]) def extract_type_name(qualified_type_name): '''Extracts the type name from a fully qualified path''' if qualified_type_name[0] in unqualified_type_markers: return qualified_type_name end_of_search = qualified_type_name.find("<") if end_of_search < 0: end_of_search = len(qualified_type_name) index = qualified_type_name.rfind("::", 0, end_of_search) if index < 0: return qualified_type_name else: return qualified_type_name[index + 2:] def type_is_tuple_like(ty): '''Returns true of this is a type with field names (struct, struct-like enum variant)''' for field in ty.fields: if field.GetName() == "RUST$ENUM$DISR": # Ignore the enum discriminant field if there is one. continue if (field.GetName() is None) or (re.match(r"__\d+$", field.GetName()) is None): return False return True def is_vec_slice(val): ty = val.GetType() if ty.GetTypeClass() != lldb.eTypeClassStruct: return False if ty.GetNumberOfFields() != 2: return False if ty.GetFieldAtIndex(0).GetName() != "data_ptr": return False if ty.GetFieldAtIndex(1).GetName() != "length": return False type_name = extract_type_name(ty.GetName()).replace("&'static", "&").replace(" ", "") return type_name.startswith("&[") and type_name.endswith("]") def is_std_vec(val): ty = val.GetType() if ty.GetTypeClass() != lldb.eTypeClassStruct: return False if ty.GetNumberOfFields() != 3: return False if ty.GetFieldAtIndex(0).GetName() != "ptr": return False if ty.GetFieldAtIndex(1).GetName() != "len": return False if ty.GetFieldAtIndex(2).GetName() != "cap": return False return ty.GetName().startswith("collections::vec::Vec<") def print_array_of_values(array_name, data_ptr_val, length, internal_dict): '''Prints a contigous memory range, interpreting it as values of the pointee-type of data_ptr_val.''' data_ptr_type = data_ptr_val.GetType() assert data_ptr_type.IsPointerType() element_type = data_ptr_type.GetPointeeType() element_type_size = element_type.GetByteSize() start_address = data_ptr_val.GetValueAsUnsigned() def render_element(i): address = start_address + i * element_type_size element_val = data_ptr_val.CreateValueFromAddress(array_name + ("[%s]" % i), address, element_type) return print_val(element_val, internal_dict) return ', '.join([render_element(i) for i in range(length)])
ksachs/invenio
refs/heads/prod
modules/bibauthorid/lib/bibauthorid_personid_maintenance.py
5
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2011, 2012 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, Inc., # 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ aidPersonID maintenance algorithms. """ from invenio.bibauthorid_name_utils import split_name_parts from invenio.bibauthorid_backinterface import get_name_by_bibref from invenio.bibauthorid_backinterface import back_up_author_paper_associations # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import compare_personid_tables # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import group_personid from invenio.bibauthorid_backinterface import check_author_paper_associations # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import repair_author_paper_associations # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import duplicated_tortoise_results_exist # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import merger_errors_exist # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import restore_author_paper_associations # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import get_all_author_paper_associations # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import get_clusters # emitting #pylint: disable-msg=W0611 from invenio.bibauthorid_backinterface import get_papers_affected_since as get_recids_affected_since # emitting #pylint: disable-msg=W0611 def convert_personid(): from invenio.dbquery import run_sql # oh come on, the whole function will be removed soon from itertools import repeat chunk = 1000 old_personid = run_sql("SELECT `personid`, `tag`, `data`, `flag`, `lcul` FROM `aidPERSONID`") def flush_papers(args): run_sql("INSERT INTO `aidPERSONIDPAPERS` " "(`personid`, " " `bibref_table`, " " `bibref_value`, " " `bibrec`, " " `name`, " " `flag`, " " `lcul`) " "VALUES " + " , ".join(repeat("(%s, %s, %s, %s, %s, %s, %s)", len(args) / 7)), tuple(args)) def flush_data(args): run_sql("INSERT INTO `aidPERSONIDDATA` " "(`personid`, " " `tag`, " " `data`, " " `opt1`, " " `opt2`) " "VALUES " + " , ".join(repeat("(%s, %s, %s, %s, %s)", len(args) / 5)), tuple(args)) paper_args = [] data_args = [] for row in old_personid: if row[1] == 'paper': bibref, rec = row[2].split(',') tab, ref = bibref.split(':') try: name = get_name_by_bibref((int(tab), int(ref), int(rec))) except: continue paper_args += [row[0], tab, ref, rec, name, row[3], row[4]] if len(paper_args) > chunk: flush_papers(paper_args) paper_args = [] elif row[1] == 'gathered_name': continue else: data_args += list(row) if len(data_args) > chunk: flush_data(data_args) data_args = [] if paper_args: flush_papers(paper_args) if data_args: flush_data(data_args) def compare_personids(path): ''' Use this function with back_up_author_paper_associations() to diff personids. ''' fp = open(path, "w") pid1_p, pid1_d = group_personid("aidPERSONIDPAPERS_copy", "aidPERSONIDDATA_copy") pid2_p, pid2_d = group_personid("aidPERSONIDPAPERS", "aidPERSONIDDATA") compare_personid_tables(pid1_p, pid1_d, pid2_p, pid2_d, fp)
GrognardsFromHell/TemplePlus
refs/heads/master
tpdatasrc/co8infra/scr/Spell139 - Dominate Animal.py
2
from toee import * def OnBeginSpellCast( spell ): print "Dominate Animal OnBeginSpellCast" print "spell.target_list=", spell.target_list print "spell.caster=", spell.caster, " caster.level= ", spell.caster_level game.particles( "sp-enchantment-conjure", spell.caster ) def OnSpellEffect( spell ): print "Dominate Animal OnSpellEffect" spell.duration = 1 * spell.caster_level target_item = spell.target_list[0] if not target_item.obj.is_friendly( spell.caster ): if target_item.obj.is_category_type( mc_type_animal ): if not target_item.obj.saving_throw_spell( spell.dc, D20_Save_Will, D20STD_F_NONE, spell.caster, spell.id ): # saving throw unsuccessful target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30002 ) target_item.obj.condition_add_with_args( 'sp-Dominate Animal', spell.id, spell.duration, target_item.obj.hit_dice_num ) target_item.partsys_id = game.particles( 'sp-Dominate Animal', target_item.obj ) # add target to initiative, just in case target_item.obj.add_to_initiative() game.update_combat_ui() else: # saving throw successful target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30001 ) game.particles( 'Fizzle', target_item.obj ) spell.target_list.remove_target( target_item.obj ) else: # not an animal target_item.obj.float_mesfile_line( 'mes\\spell.mes', 30000 ) target_item.obj.float_mesfile_line( 'mes\\spell.mes', 31002 ) game.particles( 'Fizzle', target_item.obj ) spell.target_list.remove_target( target_item.obj ) else: # can't target friendlies game.particles( 'Fizzle', target_item.obj ) spell.target_list.remove_target( target_item.obj ) spell.spell_end( spell.id ) def OnBeginRound( spell ): print "Dominate Animal OnBeginRound" def OnEndSpellCast( spell ): print "Dominate Animal OnEndSpellCast"
jaredculp/faker
refs/heads/master
faker/providers/company/pt_BR/__init__.py
16
# coding=utf-8 from __future__ import unicode_literals from .. import Provider as CompanyProvider class Provider(CompanyProvider): formats = ( '{{last_name}} {{company_suffix}}', '{{last_name}} {{last_name}} {{company_suffix}}', '{{last_name}}', '{{last_name}}', ) catch_phrase_formats = ( '{{catch_phrase_noun}} {{catch_phrase_verb}} {{catch_phrase_attribute}}', ) nouns = ( 'a segurança', 'o prazer', 'o conforto', 'a simplicidade', 'a certeza', 'a arte', 'o poder', 'o direito', 'a possibilidade', 'a vantagem', 'a liberdade' ) verbs = ( 'de conseguir', 'de avançar', 'de evoluir', 'de mudar', 'de inovar', 'de ganhar', 'de atingir seus objetivos', 'de concretizar seus projetos', 'de realizar seus sonhos' ) attributes = ( 'de maneira eficaz', 'mais rapidamente', 'mais facilmente', 'simplesmente', 'com toda a tranquilidade', 'antes de tudo', 'naturalmente', 'sem preocupação', 'em estado puro', 'com força total', 'direto da fonte', 'com confiança' ) company_suffixes = ('S/A', 'S.A.', 'Ltda.', '- ME', '- EI', 'e Filhos') @classmethod def catch_phrase_noun(cls): """ Returns a random catch phrase noun. """ return cls.random_element(cls.nouns) @classmethod def catch_phrase_attribute(cls): """ Returns a random catch phrase attribute. """ return cls.random_element(cls.attributes) @classmethod def catch_phrase_verb(cls): """ Returns a random catch phrase verb. """ return cls.random_element(cls.verbs) def catch_phrase(self): """ :example 'a segurança de evoluir sem preocupação' """ pattern = self.random_element(self.catch_phrase_formats) catch_phrase = self.generator.parse(pattern) catch_phrase = catch_phrase[0].upper() + catch_phrase[1:] return catch_phrase
chrishas35/django-travis-ci
refs/heads/master
tests/regressiontests/templates/nodelist.py
122
from django.template import VariableNode, Context from django.template.loader import get_template_from_string from django.utils.unittest import TestCase from django.test.utils import override_settings class NodelistTest(TestCase): def test_for(self): source = '{% for i in 1 %}{{ a }}{% endfor %}' template = get_template_from_string(source) vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_if(self): source = '{% if x %}{{ a }}{% endif %}' template = get_template_from_string(source) vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_ifequal(self): source = '{% ifequal x y %}{{ a }}{% endifequal %}' template = get_template_from_string(source) vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) def test_ifchanged(self): source = '{% ifchanged x %}{{ a }}{% endifchanged %}' template = get_template_from_string(source) vars = template.nodelist.get_nodes_by_type(VariableNode) self.assertEqual(len(vars), 1) class ErrorIndexTest(TestCase): """ Checks whether index of error is calculated correctly in template debugger in for loops. Refs ticket #5831 """ @override_settings(DEBUG=True, TEMPLATE_DEBUG = True) def test_correct_exception_index(self): tests = [ ('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% endfor %}', (38, 56)), ('{% load bad_tag %}{% for i in range %}{% for j in range %}{% badsimpletag %}{% endfor %}{% endfor %}', (58, 76)), ('{% load bad_tag %}{% for i in range %}{% badsimpletag %}{% for j in range %}Hello{% endfor %}{% endfor %}', (38, 56)), ('{% load bad_tag %}{% for i in range %}{% for j in five %}{% badsimpletag %}{% endfor %}{% endfor %}', (38, 57)), ('{% load bad_tag %}{% for j in five %}{% badsimpletag %}{% endfor %}', (18, 37)), ] context = Context({ 'range': range(5), 'five': 5, }) for source, expected_error_source_index in tests: template = get_template_from_string(source) try: template.render(context) except (RuntimeError, TypeError) as e: error_source_index = e.django_template_source[1] self.assertEqual(error_source_index, expected_error_source_index)
swcloud/api-client-staging
refs/heads/master
generated/python/googleapis-common-protos/google/rpc/code_pb2.py
17
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/rpc/code.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='google/rpc/code.proto', package='google.rpc', syntax='proto3', serialized_pb=_b('\n\x15google/rpc/code.proto\x12\ngoogle.rpc*\xb7\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\r\n\tCANCELLED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f\x42\x1d\n\x0e\x63om.google.rpcB\tCodeProtoP\x01\x62\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _CODE = _descriptor.EnumDescriptor( name='Code', full_name='google.rpc.Code', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='OK', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='CANCELLED', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='UNKNOWN', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='INVALID_ARGUMENT', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='DEADLINE_EXCEEDED', index=4, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( name='NOT_FOUND', index=5, number=5, options=None, type=None), _descriptor.EnumValueDescriptor( name='ALREADY_EXISTS', index=6, number=6, options=None, type=None), _descriptor.EnumValueDescriptor( name='PERMISSION_DENIED', index=7, number=7, options=None, type=None), _descriptor.EnumValueDescriptor( name='UNAUTHENTICATED', index=8, number=16, options=None, type=None), _descriptor.EnumValueDescriptor( name='RESOURCE_EXHAUSTED', index=9, number=8, options=None, type=None), _descriptor.EnumValueDescriptor( name='FAILED_PRECONDITION', index=10, number=9, options=None, type=None), _descriptor.EnumValueDescriptor( name='ABORTED', index=11, number=10, options=None, type=None), _descriptor.EnumValueDescriptor( name='OUT_OF_RANGE', index=12, number=11, options=None, type=None), _descriptor.EnumValueDescriptor( name='UNIMPLEMENTED', index=13, number=12, options=None, type=None), _descriptor.EnumValueDescriptor( name='INTERNAL', index=14, number=13, options=None, type=None), _descriptor.EnumValueDescriptor( name='UNAVAILABLE', index=15, number=14, options=None, type=None), _descriptor.EnumValueDescriptor( name='DATA_LOSS', index=16, number=15, options=None, type=None), ], containing_type=None, options=None, serialized_start=38, serialized_end=349, ) _sym_db.RegisterEnumDescriptor(_CODE) Code = enum_type_wrapper.EnumTypeWrapper(_CODE) OK = 0 CANCELLED = 1 UNKNOWN = 2 INVALID_ARGUMENT = 3 DEADLINE_EXCEEDED = 4 NOT_FOUND = 5 ALREADY_EXISTS = 6 PERMISSION_DENIED = 7 UNAUTHENTICATED = 16 RESOURCE_EXHAUSTED = 8 FAILED_PRECONDITION = 9 ABORTED = 10 OUT_OF_RANGE = 11 UNIMPLEMENTED = 12 INTERNAL = 13 UNAVAILABLE = 14 DATA_LOSS = 15 DESCRIPTOR.enum_types_by_name['Code'] = _CODE DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\016com.google.rpcB\tCodeProtoP\001')) # @@protoc_insertion_point(module_scope)
ftl-toolbox/lib_openshift
refs/heads/master
lib_openshift/models/v1beta1_rollback_config.py
2
# coding: utf-8 """ OpenAPI spec version: Generated by: https://github.com/swagger-api/swagger-codegen.git Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pformat from six import iteritems import re class V1beta1RollbackConfig(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ operations = [ ] # The key is attribute name # and the value is attribute type. swagger_types = { 'revision': 'int' } # The key is attribute name # and the value is json key in definition. attribute_map = { 'revision': 'revision' } def __init__(self, revision=None): """ V1beta1RollbackConfig - a model defined in Swagger """ self._revision = revision @property def revision(self): """ Gets the revision of this V1beta1RollbackConfig. The revision to rollback to. If set to 0, rollbck to the last revision. :return: The revision of this V1beta1RollbackConfig. :rtype: int """ return self._revision @revision.setter def revision(self, revision): """ Sets the revision of this V1beta1RollbackConfig. The revision to rollback to. If set to 0, rollbck to the last revision. :param revision: The revision of this V1beta1RollbackConfig. :type: int """ self._revision = revision def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(V1beta1RollbackConfig.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ return self.__dict__ == other.__dict__ def __ne__(self, other): """ Returns true if both objects are not equal """ return not self == other
julianprabhakar/eden_car
refs/heads/master
languages/en-gb.py
2
# -*- coding: utf-8 -*- { "A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year": "A volunteer is defined as active if they've participated in an average of 8 or more hours of Programme work or Trainings per month in the last year", 'Ability to customize the list of details tracked at a Shelter': 'Ability to customise the list of details tracked at a Shelter', 'Ability to customize the list of human resource tracked at a Shelter': 'Ability to customise the list of human resource tracked at a Shelter', 'Ability to customize the list of important facilities needed at a Shelter': 'Ability to customise the list of important facilities needed at a Shelter', "Acronym of the organization's name, eg. IFRC.": "Acronym of the organisation's name, eg. IFRC.", 'Add a new program to the catalog.': 'Add a new programme to the catalog.', 'Add all organizations which are involved in different roles in this project': 'Add all organisations which are involved in different roles in this project', 'Add Branch Organization': 'Add New Branch Organisation', 'Add Item to Catalog': 'Add Item to Catalogue', 'Add New Program': 'Add New Programme', 'Add Organization Domain': 'Add Organisation Domain', 'Add Organization to Project': 'Add Organisation to Project', 'Add Program Hours': 'Add Programme Hours', 'Canceled': 'Cancelled', 'Cannot make an Organization a branch of itself!': 'Cannot make an Organisation a branch of itself!', 'Capturing the projects each organization is providing and where': 'Capturing the projects each organisation is providing and where', 'Catalog': 'Catalogue', 'Catalog added': 'Catalogue added', 'Catalog deleted': 'Catalogue deleted', 'Catalog Details': 'Catalogue Details', 'Catalog Item added': 'Catalogue Item added', 'Catalog Item deleted': 'Catalogue Item deleted', 'Catalog Item updated': 'Catalogue Item updated', 'Catalog Items': 'Catalogue Items', 'Catalog updated': 'Catalogue updated', 'Catalogs': 'Catalogues', 'Certificate Catalog': 'Certificate Catalogue', 'Certifying Organization': 'Certifying Organisation', 'Commitment Canceled': 'Commitment Cancelled', 'Community Organization': 'Community Organisation', 'Competency Rating Catalog': 'Competency Rating Catalogue', 'Configure resources to synchronize, update methods and policies': 'Configure resources to synchronise, update methods and policies', 'Configure/Monitor Synchronization': 'Configure/Monitor Synchronisation', 'Course Catalog': 'Course Catalogue', 'Create Catalog': 'Create Catalogue', 'Create Catalog Item': 'Create Catalogue Item', 'Create Organization': 'Create Organisation', 'Create Organization Type': 'Create Organisation Type', 'Create Partner Organization': 'Create Partner Organisation', 'Create Program': 'Create Programme', 'Credentialling Organization': 'Credentialling Organisation', 'Current Owned By (Organization/Branch)': 'Current Owned By (Organisation/Branch)', 'Currently no programs registered': 'Currently no programmes registered', 'Delete Catalog': 'Delete Catalogue', 'Delete Catalog Item': 'Delete Catalogue Item', 'Delete Organization': 'Delete Organisation', 'Delete Organization Domain': 'Delete Organisation Domain', 'Delete Organization Type': 'Delete Organisation Type', 'Delete Partner Organization': 'Delete Partner Organisation', 'Delete Program': 'Delete Programme', 'Department Catalog': 'Department Catalogue', 'Donating Organization': 'Donating Organisation', 'Edit Catalog': 'Edit Catalogue', 'Edit Catalog Item': 'Edit Catalogue Item', 'Edit Organization': 'Edit Organisation', 'Edit Organization Domain': 'Edit Organisation Domain', 'Edit Organization Type': 'Edit Organisation Type', 'Edit Partner Organization': 'Edit Partner Organisation', 'Edit Program': 'Edit Programme', 'Edit Project Organization': 'Edit Project Organisation', 'Edit Synchronization Settings': 'Edit Synchronisation Settings', 'Enter your organization': 'Enter your organisation', 'Filter': 'Filter', 'From Organization': 'From Organisation', 'Fulfill Requests': 'Fulfil Requests', 'Funding Organization': 'Funding Organisation', 'Funds Contributed by this Organization': 'Funds Contributed by this Organisation', 'Hair Color': 'Hair Colour', 'Hours by Program Report': 'Hours by Programme Report', 'Identifier which the repository identifies itself with when sending synchronization requests.': 'Identifier which the repository identifies itself with when sending synchronisation requests.', "If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": "If this field is populated then a user who specifies this Organisation when signing up will be assigned as a Staff of this Organisation unless their domain doesn't match the domain field.", 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organisation', "If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": "If you don't see the Organisation in the list, you can add a new one by clicking link 'Add Organisation'.", 'Import Organizations': 'Import Organisations', 'Import Partner Organizations': 'Import Partner Organisations', 'Import Project Organizations': 'Import Project Organisations', 'In Catalogs': 'In Catalogues', 'Intergovernmental Organization': 'Intergovernmental Organisation', 'International Organization': 'International Organisation', 'Item Catalog Details': 'Item Catalogue Details', 'Item Catalogs': 'Item Catalogues', 'Job Role Catalog': 'Job Role Catalogue', 'Job Title Catalog': 'Job Title Catalogue', 'Kit canceled': 'Kit cancelled', 'Last Synchronization': 'Last Synchronisation', 'Last synchronized on': 'Last synchronised on', 'Lead Organization': 'Lead Organisation', 'List All Organization Approvers & Whitelists': 'List All Organisation Approvers & Whitelists', 'List Organization Domains': 'List Organisation Domains', 'List Organization Types': 'List Organisation Types', 'List Organizations': 'List Organisations', 'List Partner Organizations': 'List Partner Organisations', 'List Programs': 'List Programmes', 'List Project Organizations': 'List Project Organisations', 'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'Logo of the organisation. This should be a png or jpeg file and it should be no larger than 400x400', 'Manage Organization Contacts': 'Manage Organisation Contacts', 'Manage Organizations': 'Manage Organisations', 'Manual Synchronization': 'Manual Synchronisation', 'Matching Catalog Items': 'Matching Catalogue Items', 'Monetization': 'Monetisation', 'Monetization Report': 'Monetisation Report', 'No Catalog Items currently registered': 'No Catalogue Items currently registered', 'No Catalogs currently registered': 'No Catalogues currently registered', 'No Matching Catalog Items': 'No Matching Catalogue Items', 'No Organization Domains currently registered': 'No Organisation Domains currently registered', 'No Organization Types currently registered': 'No Organisation Types currently registered', 'No Organizations currently registered': 'No Organisations currently registered', 'No Organizations for this Project': 'No Organisations for this Project', 'No Partner Organizations currently registered': 'No Partner Organisations currently registered', 'Office/Center': 'Office/Centre', 'Order canceled': 'Order cancelled', 'Organization': 'Organisation', 'Organization added': 'Organisation added', 'Organization added to Project': 'Organisation added to Project', 'Organization deleted': 'Organisation deleted', 'Organization Details': 'Organisation Details', 'Organization Domain added': 'Organisation Domain added', 'Organization Domain deleted': 'Organisation Domain deleted', 'Organization Domain Details': 'Organisation Domain Details', 'Organization Domain updated': 'Organisation Domain updated', 'Organization Domains': 'Organisation Domains', 'Organization Registry': 'Organisation Registry', 'Organization removed from Project': 'Organisation removed from Project', 'Organization Type': 'Organisation Type', 'Organization Type added': 'Organisation Type added', 'Organization Type deleted': 'Organisation Type deleted', 'Organization Type Details': 'Organisation Type Details', 'Organization Type updated': 'Organisation Type updated', 'Organization Types': 'Organisation Types', 'Organization Units': 'Organisation Units', 'Organization updated': 'Organisation updated', 'Organization(s)': 'Organisation(s)', 'Organization/Branch': 'Organisation/Branch', 'Organization/Supplier': 'Organisation/Supplier', 'Organizational Development': 'Organisational Development', 'Organizations': 'Organisations', 'Organized By': 'Organised By', 'Owned By (Organization/Branch)': 'Owned By (Organisation/Branch)', 'Owning Organization': 'Owning Organisation', 'Participating Organizations': 'Participating Organisations', 'Partner Organization': 'Partner Organisation', 'Partner Organization added': 'Partner Organisation added', 'Partner Organization deleted': 'Partner Organisation deleted', 'Partner Organization Details': 'Partner Organisation Details', 'Partner Organization updated': 'Partner Organisation updated', 'Partner Organizations': 'Partner Organisations', "Phone number to donate to this organization's relief efforts.": "Phone number to donate to this organisation's relief efforts.", 'Please enter a %(site)s OR an Organization': 'Please enter a %(site)s OR an Organisation', 'Please enter an Organization/Supplier': 'Please enter an Organisation/Supplier', 'Please use this field to record any additional information, including a history of the record if it is updated.': 'Please use this field to record any additional information, including a history of the record if it is updated.', 'Position Catalog': 'Position Catalogue', 'Program': 'Programme', 'Program added': 'Programme added', 'Program deleted': 'Programme deleted', 'Program Details': 'Programme Details', 'Program Hours': 'Programme Hours', 'Program Hours (Month)': 'Programme Hours (Month)', 'Program Hours (Year)': 'Programme Hours (Year)', 'Program updated': 'Programme updated', 'Programs': 'Programmes', 'Project Details including organizations': 'Project Details including organisations', 'Project Details including organizations and communities': 'Project Details including organisations and communities', 'Project Organization Details': 'Project Organisation Details', 'Project Organization updated': 'Project Organisation updated', 'Project Organizations': 'Project Organisations', 'Received Shipment canceled': 'Received Shipment cancelled', 'Request Canceled': 'Request Cancelled', 'Request for Donations Canceled': 'Request for Donations Cancelled', 'Request for Volunteers Canceled': 'Request for Volunteers Cancelled', 'Resource Mobilization': 'Resource Mobilisation', 'Schedule synchronization jobs': 'Schedule synchronisation jobs', 'Search by organization.': 'Search by organisation.', 'Search for an Organization by name or acronym': 'Search for an Organisation by name or acronym', 'Search for an Organization by name or acronym.': 'Search for an Organisation by name or acronym.', 'Search for office by organization or branch.': 'Search for office by organisation or branch.', 'Search for warehouse by organization.': 'Search for warehouse by organisation.', 'Search Organization Domains': 'Search Organisation Domains', 'Search Organization Types': 'Search Organisation Types', 'Search Organizations': 'Search Organisations', 'Search Partner Organizations': 'Search Partner Organisations', 'Search Programs': 'Search Programmes', 'Search Project Organizations': 'Search Project Organisations', 'Sent Shipment canceled': 'Sent Shipment cancelled', 'Sent Shipment canceled and items returned to Warehouse': 'Sent Shipment cancelled and items returned to Warehouse', 'Shipping Organization': 'Shipping Organisation', 'Social Mobilization': 'Social Mobilisation', 'Specialized Hospital': 'Specialised Hospital', 'Synchronization': 'Synchronisation', 'Synchronization Job': 'Synchronisation Job', 'Synchronization Log': 'Synchronisation Log', 'Synchronization mode': 'Synchronisation mode', 'Synchronization Schedule': 'Synchronisation Schedule', 'Synchronization Settings': 'Synchronisation Settings', 'Synchronization settings updated': 'Synchronisation settings updated', 'Synchronize now': 'Synchronise now', 'The default Organization for whom this person is acting.': 'The default Organisation for whom this person is acting.', 'The default Organization for whom you are acting.': 'The default Organisation for whom you are acting.', 'The Organization Registry keeps track of all the relief organizations working in the area.': 'The Organisation Registry keeps track of all the relief organisations working in the area.', 'The synchronization module allows the synchronization of data resources between Sahana Eden instances.': 'The synchronisation module allows the synchronisation of data resources between Sahana Eden instances.', 'This shipment has already been received & subsequently canceled.': 'This shipment has already been received & subsequently cancelled.', 'This shipment has not been received - it has NOT been canceled because it can still be edited.': 'This shipment has not been received - it has NOT been cancelled because it can still be edited.', 'This shipment has not been sent - it has NOT been canceled because it can still be edited.': 'This shipment has not been sent - it has NOT been cancelled because it can still be edited.', 'To Organization': 'To Organisation', 'Training Course Catalog': 'Training Course Catalogue', 'Transfer Ownership To (Organization/Branch)': 'Transfer Ownership To (Organisation/Branch)', "Type the name of an existing catalog item OR Click 'Create Item' to add an item which is not in the catalog.": "Type the name of an existing catalogue item OR Click 'Create Item' to add an item which is not in the catalogue.", 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronization': 'Under which condition a local record shall be updated if it also has been modified locally since the last synchronisation', 'Unique identifier which THIS repository identifies itself with when sending synchronization requests.': 'Unique identifier which THIS repository identifies itself with when sending synchronisation requests.', 'User Guidelines Synchronization': 'User Guidelines Synchronisation', 'Utilization Report': 'Utilisation Report', 'Volunteer Role Catalog': 'Volunteer Role Catalogue', 'Work on Program': 'Work on Programme', 'Year that the organization was founded': 'Year that the organisation was founded', }